diff --git a/README.md b/README.md
index 61c9054..773e819 100644
--- a/README.md
+++ b/README.md
@@ -32,6 +32,8 @@ DataJoint Elements ([element-lab](https://github.com/datajoint/element-lab),
[element-miniscope](https://github.com/datajoint/element-miniscope)) assembled
together to form a fully functional workflow.
+![element miniscope diagram](images/attached_miniscope_element.svg)
+
## Installation instructions
+ The installation instructions can be found at the
diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev
index 754c196..17cef9f 100644
--- a/docker/Dockerfile.dev
+++ b/docker/Dockerfile.dev
@@ -1,18 +1,17 @@
-FROM datajoint/djbase:py3.8-debian-fcd8909
+FROM datajoint/djbase:py3.9-debian-8eb1715
USER anaconda:anaconda
COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/
RUN /entrypoint.sh echo "Installed dependencies."
-# Install Caiman
-RUN git clone --branch master https://github.com/datajoint-company/CaImAn
+# Install CaImAn
+RUN git clone --branch master https://github.com/kabilar/CaImAn
WORKDIR /main/CaImAn
RUN conda install -n base -c conda-forge -y mamba
-RUN /bin/bash -c 'mamba env update --n base --file environment.yml'
-# Suite2p requires np.__version__ == 1.21
-RUN pip install numpy==1.21
+RUN /bin/bash -c 'mamba env update --n base --file environment.yml'
RUN pip install .
+RUN python caimanmanager.py install --inplace
WORKDIR /main
diff --git a/docker/Dockerfile.test b/docker/Dockerfile.test
index 5d1eb33..6d04352 100644
--- a/docker/Dockerfile.test
+++ b/docker/Dockerfile.test
@@ -1,10 +1,18 @@
-FROM datajoint/djbase:py3.9-debian-fcd8909
+FROM datajoint/djbase:py3.9-debian-8eb1715
USER anaconda:anaconda
COPY ./workflow-miniscope/docker/apt_requirements.txt /tmp/
RUN /entrypoint.sh echo "Installed dependencies."
+# Install CaImAn
+RUN git clone --branch master https://github.com/kabilar/CaImAn
+WORKDIR /main/CaImAn
+RUN conda install -n base -c conda-forge -y mamba
+RUN /bin/bash -c 'mamba env update --n base --file environment.yml'
+RUN pip install .
+RUN python caimanmanager.py install --inplace
+
WORKDIR /main
# Option 1 - Install DataJoint's remote fork of the workflow and elements
@@ -45,4 +53,6 @@ RUN rm -f /main/workflow-miniscope/dj_local_conf.json
RUN pip install /main/workflow-miniscope
RUN pip install -r /main/workflow-miniscope/requirements_test.txt
+RUN pip uninstall datajoint
+RUN pip install git+
WORKDIR /main/workflow-miniscope
diff --git a/docker/docker-compose-dev.yaml b/docker/docker-compose-dev.yaml
index 56d041b..1fb5cde 100644
--- a/docker/docker-compose-dev.yaml
+++ b/docker/docker-compose-dev.yaml
@@ -18,7 +18,7 @@ services:
context: ../../
dockerfile: ./workflow-miniscope/docker/Dockerfile.dev
env_file: .env
- image: workflow-miniscope-dev:0.1.0a2
+ image: workflow-miniscope-dev:0.1.0
container_name: workflow-miniscope-dev
environment:
- MINISCOPE_ROOT_DATA_DIR=/main/test_data/workflow_miniscope/
diff --git a/docker/docker-compose-test.yaml b/docker/docker-compose-test.yaml
index 8f291b1..847e1b9 100644
--- a/docker/docker-compose-test.yaml
+++ b/docker/docker-compose-test.yaml
@@ -17,7 +17,8 @@ services:
build:
context: ../../
dockerfile: ./workflow-miniscope/docker/Dockerfile.test
- image: workflow-miniscope-test:0.1.0a2
+ env_file: .env
+ image: workflow-miniscope-test:0.1.0
container_name: workflow-miniscope-test
environment:
- DJ_HOST=db
@@ -30,7 +31,7 @@ services:
- -c
- |
echo "------ INTEGRATION TESTS ------"
- pytest -sv --cov-report term-missing --cov=workflow_miniscope -p no:warnings
+ pytest -sv --cov-report term-missing --cov=workflow_miniscope -p no:warnings tests/
tail -f /dev/null
volumes:
- ${TEST_DATA_DIR}:/main/test_data
diff --git a/images/attached_miniscope_element.svg b/images/attached_miniscope_element.svg
new file mode 100644
index 0000000..3385e7c
--- /dev/null
+++ b/images/attached_miniscope_element.svg
@@ -0,0 +1,1622 @@
+
+
diff --git a/notebooks/00-data-download-optional.ipynb b/notebooks/00-data-download-optional.ipynb
new file mode 100644
index 0000000..3c0f38c
--- /dev/null
+++ b/notebooks/00-data-download-optional.ipynb
@@ -0,0 +1,167 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Download example dataset\n",
+ "\n",
+ "+ This workflow will need miniscope calcium imaging data collected from the UCLA Miniscope and processed with CaImAn. We provide an example dataset to be downloaded to run through the workflow. This notebook walks you through the process to download the dataset.\n",
+ "\n",
+ "## Install `djarchive-client`\n",
+ "\n",
+ "+ The example dataset is hosted on `djarchive`, an AWS storage.\n",
+ "\n",
+ "+ We provide a client package, [djarchive-client](https://github.com/datajoint/djarchive-client), to download the data which can be installed with pip:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pip install git+https://github.com/datajoint/djarchive-client.git"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Download example datasets using `djarchive-client`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import djarchive_client\n",
+ "client = djarchive_client.client()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Browse the datasets that are available on `djarchive`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "list(client.datasets())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Browse the different versions of each dataset:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "list(client.revisions())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To download the dataset, let's prepare a directory, for example in `/tmp`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "os.mkdir('/tmp/example_data')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Run download for a given dataset and revision:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "client.download('workflow-miniscope-test-set', target_directory='/tmp/example_data', revision='v1')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Directory structure\n",
+ "\n",
+ "+ After downloading, the directory will be organized as follows:\n",
+ "\n",
+ " ```\n",
+ " /tmp/example_data/\n",
+ " - subject1/\n",
+ " - session1/\n",
+ " - 0.avi\n",
+ " - metaData.json\n",
+ " - timeStamps.csv\n",
+ " - caiman/\n",
+ " - subject1_session1.hdf5\n",
+ " ```\n",
+ "\n",
+ "+ subject 1 data is recorded with the UCLA Miniscope and Miniscope-DAQ-V4 acquisition software, and processed with CaImAn.\n",
+ "\n",
+ "+ We will use the dataset for subject 1 as an example for the rest of the notebooks. If you use your own dataset for the workflow, change the path accordingly.\n",
+ "\n",
+ "## Next step\n",
+ "\n",
+ "+ In the next notebook ([01-configure](01-configure.ipynb)) we will set up the configuration file for the workflow."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": []
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,scripts//py",
+ "main_language": "python"
+ },
+ "kernelspec": {
+ "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)",
+ "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebooks/00-parameters.py b/notebooks/00-parameters.py
deleted file mode 100644
index 479e154..0000000
--- a/notebooks/00-parameters.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# ---
-# jupyter:
-# jupytext:
-# formats: ipynb,py
-# text_representation:
-# extension: .py
-# format_name: light
-# format_version: '1.5'
-# jupytext_version: 1.11.1
-# kernelspec:
-# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)'
-# metadata:
-# interpreter:
-# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c
-# name: python3
-# ---
-
-# # Insert an entry into `imaging.ProcessingParamSet`
-#
-# + The entry will comprise the parameters used for processing with the analysis package.
-#
-# + If the same parameters are used to analyze multiple datasets, the parameters only need to be inserted once.
-#
-# + This step is in a separate Jupyter Notebook because the parameters would otherwise clutter the next main notebook (`01ingest.ipynb`).
-
-# Change into the parent directory to find the `dj_local_conf.json` file.
-# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.
-import os
-os.chdir('..')
-
-import numpy as np
-from workflow_miniscope.pipeline import *
-
-# ## Define the `MiniscopeAnalysis` parameters
-
-params = dict(pars_envs = ['memory_size_to_use', 12, 'memory_size_per_patch', 0.6, 'patch_dims', [64, 64]],
- include_residual = False,
- gSig = 3,
- gSiz = 15,
- ssub = 1,
- with_dendrites = True,
- updateA_search_method = 'dilate',
- updateA_bSiz = 5,
- updateA_dist = None,
- spatial_constraints = ['connected', True, 'circular', False],
- spatial_algorithm = 'hals_thresh',
- Fs = 30,
- tsub = 5,
- deconv_flag = True,
- deconv_options = ['type', 'ar1', 'method', 'constrained', 'smin', -5, 'optimize_pars', True, 'optimize_b', True, 'max_tau', 100],
- nk = 3,
- detrend_method = 'spline',
- bg_model = 'ring',
- nb = 1,
- ring_radius = 23,
- num_neighbors = [],
- show_merge = False,
- merge_thr = 0.65,
- method_dist = 'max',
- dmin = 5,
- dmin_only = 2,
- merge_thr_spatial = [0.8, 0.4, -float('inf')],
- K = [],
- min_corr = 0.9,
- min_pnr = 15,
- min_pixel = None,
- bd = 0,
- frame_range = [],
- save_initialization = False,
- use_parallel = True,
- show_init = False,
- choose_params = False,
- center_psf = True,
- min_corr_res = 0.7,
- min_pnr_res = 8,
- seed_method_res = 'auto',
- update_sn = True,
- with_manual_intervention = False)
-
-# ## Insert the `MiniscopeAnalysis` parameters
-#
-# + The `insert_new_params` is a utility function as part of the `imaging.ProcessingParamSet` table that is used to verify the parameter set does not already exist in the table.
-
-imaging.ProcessingParamSet.insert_new_params(
- processing_method='mcgill_miniscope_analysis',
- paramset_idx=0,
- paramset_desc='Calcium imaging analysis with Miniscope Analysis using default parameters',
- params=params)
-
-# ## Proceed to the `01ingest.ipynb` Jupyter Notebook
-#
-# + This notebook describes the steps to ingest the imaging metadata and processed data.
diff --git a/notebooks/01-configure.ipynb b/notebooks/01-configure.ipynb
new file mode 100644
index 0000000..65c7f52
--- /dev/null
+++ b/notebooks/01-configure.ipynb
@@ -0,0 +1,221 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Configure DataJoint connection to the database\n",
+ "\n",
+ "+ To run `workflow-miniscope`, we need to properly set up the DataJoint configuration. The configuration will be saved in a file called `dj_local_conf.json` on each machine and this notebook walks you through the process.\n",
+ "\n",
+ "+ The configuration only needs to be set up once. If you have gone through the configuration before, directly go to [02-workflow-structure](02-workflow-structure-optional.ipynb).\n",
+ "\n",
+ "## Set up configuration in root directory of this package\n",
+ "\n",
+ "+ As a convention, we set the configuration up in the root directory of the `workflow-miniscope` package and always start importing DataJoint and pipeline modules from there."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import datajoint as dj"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configure database host address and credentials\n",
+ "\n",
+ "Now let's set up the host, user and password in the `dj.config` global variable"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import getpass\n",
+ "dj.config['database.host'] = '{YOUR_HOST}'\n",
+ "dj.config['database.user'] = '{YOUR_USERNAME}'\n",
+ "dj.config['database.password'] = getpass.getpass() # enter the password securily"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You should be able to connect to the database at this stage."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.conn()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Configure the `custom` field in `dj.config` for element-miniscope\n",
+ "\n",
+ "+ The major component of the current workflow is the [DataJoint element-miniscope](https://github.com/datajoint/element-miniscope). `element-miniscope` requires configurations in the field `custom` in `dj.config`:\n",
+ "\n",
+ "### Database prefix\n",
+ "\n",
+ "+ Giving a prefix to schema could help on the configuration of privilege settings. For example, if we set prefix `neuro_`, every schema created with the current workflow will start with `neuro_`, e.g. `neuro_lab`, `neuro_subject`, `neuro_session`, and `neuro_miniscope`.\n",
+ "\n",
+ "+ The prefix could be configurated as follows in `dj.config`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.config['custom'] = {'database.prefix': 'neuro_'}"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Root directories for miniscope calcium imaging raw data and processed results\n",
+ "\n",
+ "+ `miniscope_root_data_dir` field indicates the root directory for the miniscope raw data from the Miniscope-DAQ acquisition software (e.g. `*.avi`) or the processed results from CaImAn (e.g. `*.hdf5`). The root path typically do not contain information of subjects or sessions, all data from subjects/sessions should be subdirectories in the root path.\n",
+ "\n",
+ "+ In the database, every path for the raw miniscope data is relative to this root path. The benefit is that the absolute path could be configured for each machine, and when data transfer happens, we just need to change the root directory in the config file.\n",
+ "\n",
+ "+ The workflow supports multiple root directories. If there are multiple possible root directories, specify the `miniscope_root_data_dir` as a list.\n",
+ "\n",
+ "+ The root path(s) are specific to each machine, as the name of drive mount could be different for different operating systems or machines.\n",
+ "\n",
+ "+ In the context of the workflow, all the paths saved into the database or saved in the config file need to be in the POSIX standards (Unix/Linux), with `/`. The path conversion for machines of any operating system is taken care of inside the elements."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If using our example dataset, downloaded with this notebook [00-data-download](00-data-download-optional.ipynb), the root directory will be:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# If there is only one root path:\n",
+ "dj.config['custom']['miniscope_root_data_dir'] = '/tmp/example_data'\n",
+ "# If there are multiple possible root paths:\n",
+ "dj.config['custom']['miniscope_root_data_dir'] = ['/tmp/example_data']"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.config"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Save the configuration as a json file\n",
+ "\n",
+ "With the proper configurations, we could save this as a file, either as a local json file, or a global file."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.config.save_local()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ls"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Local configuration file is saved as `dj_local_conf.json` in the root directory of this package `workflow-miniscope`. Next time if you change your directory to `workflow-miniscope` before importing DataJoint and the pipeline packages, the configurations will get properly loaded.\n",
+ "\n",
+ "If saved globally, there will be a hidden configuration file saved in your root directory. The configuration will be loaded no matter where the directory is."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# dj.config.save_global()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Next Step\n",
+ "\n",
+ "After the configuration, we will be able to run through the workflow with the [02-workflow-structure](02-workflow-structure-optional.ipynb) notebook."
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,scripts//py"
+ },
+ "kernelspec": {
+ "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)",
+ "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebooks/01-ingest.ipynb b/notebooks/01-ingest.ipynb
deleted file mode 100644
index 87a862a..0000000
--- a/notebooks/01-ingest.ipynb
+++ /dev/null
@@ -1,484 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Ingestion routine\n",
- "\n",
- "+ The following script outlines the steps to ingest UCLA Miniscope data (acquired metadata and processed data) into the DataJoint `workflow-miniscope`.\n",
- "\n",
- "+ To ingest with a completely automated workflow, see `03automate.ipynb`."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Change into the parent directory to find the `dj_local_conf.json` file. \n",
- "# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.\n",
- "import os\n",
- "os.chdir('..')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "from workflow_miniscope.pipeline import *"
- ]
- },
- {
- "source": [
- "## Schema diagrams\n",
- "\n",
- "+ The following outputs are the diagrams of the schemas comprising this workflow.\n",
- "\n",
- "+ Please refer back to these diagrams to visualize the relationships of different tables."
- ],
- "cell_type": "markdown",
- "metadata": {}
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [
- {
- "output_type": "execute_result",
- "data": {
- "text/plain": [
- ""
- ],
- "image/svg+xml": ""
- },
- "metadata": {},
- "execution_count": 4
- }
- ],
- "source": [
- "dj.Diagram(lab)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [
- {
- "output_type": "execute_result",
- "data": {
- "text/plain": [
- ""
- ],
- "image/svg+xml": ""
- },
- "metadata": {},
- "execution_count": 5
- }
- ],
- "source": [
- "dj.Diagram(subject)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "output_type": "execute_result",
- "data": {
- "text/plain": [
- ""
- ],
- "image/svg+xml": ""
- },
- "metadata": {},
- "execution_count": 6
- }
- ],
- "source": [
- "dj.Diagram(session)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "output_type": "execute_result",
- "data": {
- "text/plain": [
- ""
- ],
- "image/svg+xml": ""
- },
- "metadata": {},
- "execution_count": 7
- }
- ],
- "source": [
- "dj.Diagram(scan)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "metadata": {},
- "outputs": [
- {
- "output_type": "execute_result",
- "data": {
- "text/plain": [
- ""
- ],
- "image/svg+xml": ""
- },
- "metadata": {},
- "execution_count": 8
- }
- ],
- "source": [
- "dj.Diagram(imaging)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Insert an entry into `subject.Subject`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [],
- "source": [
- "subject.Subject.insert1(dict(subject='subject1', \n",
- " sex='F', \n",
- " subject_birth_date='2019-01-01 00:00:01', \n",
- " subject_description='no description'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Insert an entry into `lab.Equipment`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {},
- "outputs": [],
- "source": [
- "Equipment.insert1(dict(scanner='Miniscope-DAQ-V3'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Insert an entry into `session.Session`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {},
- "outputs": [],
- "source": [
- "session.Session.insert1(dict(subject='subject1', \n",
- " session_datetime='2021-01-01 00:00:01'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Insert an entry into `session.SessionDirectory`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [],
- "source": [
- "session.SessionDirectory.insert1(dict(subject='subject1', \n",
- " session_datetime='2021-01-01 00:00:01', \n",
- " session_dir='/subject1/session0'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Insert an entry into `scan.Scan`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "scan.Scan.insert1(dict(subject='subject1', \n",
- " session_datetime='2021-01-01 00:00:01', \n",
- " scan_id=0, \n",
- " scanner='Miniscope-DAQ-V3', \n",
- " acq_software='Miniscope-DAQ-V3',\n",
- " scan_notes=''))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `scan.ScanInfo`\n",
- "\n",
- "+ This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.).\n",
- "+ `populate` automatically calls `make` for every key for which the auto-populated table is missing data.\n",
- "+ `populate_settings` passes arguments to the `populate` method.\n",
- "+ `display_progress=True` reports the progress bar"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "populate_settings = {'display_progress': True}"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "scan.ScanInfo.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "collapsed": false
- },
- "source": [
- "## Insert an entry into `imaging.ProcessingTask`\n",
- "\n",
- "+ This entry will trigger ingestion of the processed results (i.e. motion correction, segmentation, and traces)\n",
- "\n",
- "+ The `paramset_idx` is the parameter set stored in `imaging.ProcessingParamSet` that is used for the image processing.\n",
- "\n",
- "+ The `processing_output_dir` attribute contains the output directory of the processed results (relative the the imaging root data directory)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {
- "collapsed": false,
- "pycharm": {
- "name": "#%%\n"
- }
- },
- "outputs": [],
- "source": [
- "imaging.ProcessingTask.insert1(dict(subject='subject1', \n",
- " session_datetime='2021-01-01 00:00:01', \n",
- " scan_id=0,\n",
- " paramset_idx=0,\n",
- " processing_output_dir='/subject1/session0/miniscope_analysis',\n",
- " task_mode='load'))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `imaging.Processing`\n",
- "\n",
- "+ For the `task_mode=load` specified above in `imaging.ProcessingTask`, this step ensures that the output directory contains the valid processed outputs.\n",
- "\n",
- "+ In the future, this step will provide for the option to `trigger` the analysis within this workflow (if the `task_mode=trigger`)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.Processing.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `imaging.MotionCorrection`\n",
- "\n",
- "+ This table contains the rigid or non-rigid motion correction data including the shifts and summary images.\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.MotionCorrection.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "collapsed": false
- },
- "source": [
- "## Insert an entry into `imaging.Curation`\n",
- "\n",
- "+ The next step in the pipeline is the curation of segmentation results. If a manual curation was implemented, an entry needs to be manually inserted into the table Curation, which specifies the directory to the curated results in curation_output_dir. If we would like to process the processed outcome directly, an entry is also needed in Curation. A method create1_from_processing_task was provided to help this insertion. It copies the processing_output_dir in ProcessingTask to the field curation_output_dir in the table Curation with a new curation_id.\n",
- "\n",
- "+ In this example, we create/insert one `imaging.Curation` for each `imaging.ProcessingTask`, specifying the same output directory."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.Curation(dict(subject='subject1', \n",
- " session_datetime='2021-01-01 00:00:01', \n",
- " scan_id=0,\n",
- " paramset_idx=0,\n",
- " curation_id=0,\n",
- " curation_time='2021-01-01 00:00:01', \n",
- " curation_output_dir='/subject1/session0/miniscope_analysis',\n",
- " manual_curation=False,\n",
- " curation_note=''})"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `imaging.Segmentation`\n",
- "\n",
- "+ This table contains the mask coordinates, weights, and centers."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.Segmentation.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `imaging.MaskClassification`\n",
- "\n",
- "+ This table is currently not implemented."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.MaskClassification.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `imaging.Fluorescence`\n",
- "\n",
- "+ This table contains the fluorescence traces prior filtering and spike extraction"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.Fluorescence.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Populate `imaging.Activity`\n",
- "+ This table contains the inferred neural activity from the fluorescence traces."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.Activity.populate(**populate_settings)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Proceed to the `02explore.ipynb` Jupyter Notebook\n",
- "\n",
- "+ This notebook describes the steps to query, fetch, and visualize the imaging data."
- ]
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,py"
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3.7.10 64-bit ('workflow-miniscope': conda)",
- "metadata": {
- "interpreter": {
- "hash": "fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c"
- }
- }
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.10"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
\ No newline at end of file
diff --git a/notebooks/01-ingest.py b/notebooks/01-ingest.py
deleted file mode 100644
index 75c1f4b..0000000
--- a/notebooks/01-ingest.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# ---
-# jupyter:
-# jupytext:
-# formats: ipynb,py
-# text_representation:
-# extension: .py
-# format_name: light
-# format_version: '1.5'
-# jupytext_version: 1.11.1
-# kernelspec:
-# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)'
-# metadata:
-# interpreter:
-# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c
-# name: python3
-# ---
-
-# # Ingestion routine
-#
-# + The following script outlines the steps to ingest UCLA Miniscope data (acquired metadata and processed data) into the DataJoint `workflow-miniscope`.
-#
-# + To ingest with a completely automated workflow, see `03automate.ipynb`.
-
-# Change into the parent directory to find the `dj_local_conf.json` file.
-# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.
-import os
-os.chdir('..')
-
-import numpy as np
-from workflow_miniscope.pipeline import *
-
-# ## Schema diagrams
-#
-# + The following outputs are the diagrams of the schemas comprising this workflow.
-#
-# + Please refer back to these diagrams to visualize the relationships of different tables.
-
-dj.Diagram(lab)
-
-dj.Diagram(subject)
-
-dj.Diagram(session)
-
-dj.Diagram(scan)
-
-dj.Diagram(imaging)
-
-# ## Insert an entry into `subject.Subject`
-
-subject.Subject.insert1(dict(subject='subject1',
- sex='F',
- subject_birth_date='2019-01-01 00:00:01',
- subject_description='no description'))
-
-# ## Insert an entry into `lab.Equipment`
-
-Equipment.insert1(dict(scanner='Miniscope-DAQ-V3'))
-
-# ## Insert an entry into `session.Session`
-
-session.Session.insert1(dict(subject='subject1',
- session_datetime='2021-01-01 00:00:01'))
-
-# ## Insert an entry into `session.SessionDirectory`
-
-session.SessionDirectory.insert1(dict(subject='subject1',
- session_datetime='2021-01-01 00:00:01',
- session_dir='/subject1/session0'))
-
-# ## Insert an entry into `scan.Scan`
-
-scan.Scan.insert1(dict(subject='subject1',
- session_datetime='2021-01-01 00:00:01',
- scan_id=0,
- scanner='Miniscope-DAQ-V3',
- acq_software='Miniscope-DAQ-V3',
- scan_notes=''))
-
-# ## Populate `scan.ScanInfo`
-#
-# + This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.).
-# + `populate` automatically calls `make` for every key for which the auto-populated table is missing data.
-# + `populate_settings` passes arguments to the `populate` method.
-# + `display_progress=True` reports the progress bar
-
-populate_settings = {'display_progress': True}
-
-scan.ScanInfo.populate(**populate_settings)
-
-# ## Insert an entry into `imaging.ProcessingTask`
-#
-# + This entry will trigger ingestion of the processed results (i.e. motion correction, segmentation, and traces)
-#
-# + The `paramset_idx` is the parameter set stored in `imaging.ProcessingParamSet` that is used for the image processing.
-#
-# + The `processing_output_dir` attribute contains the output directory of the processed results (relative the the imaging root data directory).
-
-# + pycharm={"name": "#%%\n"}
-imaging.ProcessingTask.insert1(dict(subject='subject1',
- session_datetime='2021-01-01 00:00:01',
- scan_id=0,
- paramset_idx=0,
- processing_output_dir='/subject1/session0/miniscope_analysis',
- task_mode='load'))
-# -
-
-# ## Populate `imaging.Processing`
-#
-# + For the `task_mode=load` specified above in `imaging.ProcessingTask`, this step ensures that the output directory contains the valid processed outputs.
-#
-# + In the future, this step will provide for the option to `trigger` the analysis within this workflow (if the `task_mode=trigger`).
-
-imaging.Processing.populate(**populate_settings)
-
-# ## Populate `imaging.MotionCorrection`
-#
-# + This table contains the rigid or non-rigid motion correction data including the shifts and summary images.
-#
-
-imaging.MotionCorrection.populate(**populate_settings)
-
-# ## Insert an entry into `imaging.Curation`
-#
-# + The next step in the pipeline is the curation of segmentation results. If a manual curation was implemented, an entry needs to be manually inserted into the table Curation, which specifies the directory to the curated results in curation_output_dir. If we would like to process the processed outcome directly, an entry is also needed in Curation. A method create1_from_processing_task was provided to help this insertion. It copies the processing_output_dir in ProcessingTask to the field curation_output_dir in the table Curation with a new curation_id.
-#
-# + In this example, we create/insert one `imaging.Curation` for each `imaging.ProcessingTask`, specifying the same output directory.
-
-imaging.Curation(dict(subject='subject1',
- session_datetime='2021-01-01 00:00:01',
- scan_id=0,
- paramset_idx=0,
- curation_id=0,
- curation_time='2021-01-01 00:00:01',
- curation_output_dir='/subject1/session0/miniscope_analysis',
- manual_curation=False,
- curation_note=''})
-
-# ## Populate `imaging.Segmentation`
-#
-# + This table contains the mask coordinates, weights, and centers.
-
-imaging.Segmentation.populate(**populate_settings)
-
-# ## Populate `imaging.MaskClassification`
-#
-# + This table is currently not implemented.
-
-imaging.MaskClassification.populate(**populate_settings)
-
-# ## Populate `imaging.Fluorescence`
-#
-# + This table contains the fluorescence traces prior filtering and spike extraction
-
-imaging.Fluorescence.populate(**populate_settings)
-
-# ## Populate `imaging.Activity`
-# + This table contains the inferred neural activity from the fluorescence traces.
-
-imaging.Activity.populate(**populate_settings)
-
-# ## Proceed to the `02explore.ipynb` Jupyter Notebook
-#
-# + This notebook describes the steps to query, fetch, and visualize the imaging data.
diff --git a/notebooks/02-explore.ipynb b/notebooks/02-explore.ipynb
deleted file mode 100644
index 1a14a31..0000000
--- a/notebooks/02-explore.ipynb
+++ /dev/null
@@ -1,1401 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# [WIP] DataJoint U24 Workflow Imaging\n",
- "This notebook will describe the steps for interacting with the data ingested into `workflow-miniscope`. \n",
- "\n",
- "Prior to using this notebook, please refer to the [README](https://github.com/datajoint/workflow-imaging) for the topics listed below. \n",
- " + Installation instructions \n",
- " + Directory structure and file naming convention \n",
- " + Running the workflow "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Change into the parent directory to find the `dj_local_conf.json` file. \n",
- "# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.\n",
- "import os\n",
- "os.chdir('..')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from workflow_imaging.pipeline import *"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Workflow architecture"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/svg+xml": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "execution_count": 10,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "dj.Diagram(lab)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/svg+xml": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "execution_count": 14,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "dj.Diagram(subject)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/svg+xml": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "execution_count": 12,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "dj.Diagram(scan)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "image/svg+xml": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "execution_count": 11,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "dj.Diagram(imaging)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [],
- "source": [
- "subject.Subject()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [],
- "source": [
- "Session()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [],
- "source": [
- "scan.ScanInfo()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [],
- "source": [
- "scan.ScanInfo.Field()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.ProcessingParamSet()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [],
- "source": [
- "imaging.ProcessingTask()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,py"
- },
- "kernelspec": {
- "display_name": "Python 3.7.9 64-bit ('workflow-imaging': conda)",
- "metadata": {
- "interpreter": {
- "hash": "134d995680d44ce2483a761d95a16e9ce77f34191f18929365aa0ab3279667a1"
- }
- },
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.9-final"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
\ No newline at end of file
diff --git a/notebooks/02-explore.py b/notebooks/02-explore.py
deleted file mode 100644
index c80f6f1..0000000
--- a/notebooks/02-explore.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# ---
-# jupyter:
-# jupytext:
-# formats: ipynb,py
-# text_representation:
-# extension: .py
-# format_name: light
-# format_version: '1.5'
-# jupytext_version: 1.11.1
-# kernelspec:
-# display_name: 'Python 3.7.9 64-bit (''workflow-imaging'': conda)'
-# metadata:
-# interpreter:
-# hash: 134d995680d44ce2483a761d95a16e9ce77f34191f18929365aa0ab3279667a1
-# name: python3
-# ---
-
-# # [WIP] DataJoint U24 Workflow Imaging
-# This notebook will describe the steps for interacting with the data ingested into `workflow-miniscope`.
-#
-# Prior to using this notebook, please refer to the [README](https://github.com/datajoint/workflow-imaging) for the topics listed below.
-# + Installation instructions
-# + Directory structure and file naming convention
-# + Running the workflow
-
-# Change into the parent directory to find the `dj_local_conf.json` file.
-# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.
-import os
-os.chdir('..')
-
-from workflow_imaging.pipeline import *
-
-# ## Workflow architecture
-
-dj.Diagram(lab)
-
-dj.Diagram(subject)
-
-dj.Diagram(scan)
-
-dj.Diagram(imaging)
-
-subject.Subject()
-
-Session()
-
-scan.ScanInfo()
-
-scan.ScanInfo.Field()
-
-imaging.ProcessingParamSet()
-
-imaging.ProcessingTask()
-
-
diff --git a/notebooks/02-workflow-structure-optional.ipynb b/notebooks/02-workflow-structure-optional.ipynb
new file mode 100644
index 0000000..b93d50c
--- /dev/null
+++ b/notebooks/02-workflow-structure-optional.ipynb
@@ -0,0 +1,366 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Introduction to the workflow structure\n",
+ "\n",
+ "This notebook gives a brief overview of the workflow structure and introduces some useful DataJoint tools to facilitate the exploration.\n",
+ "\n",
+ "+ DataJoint needs to be pre-configured before running this notebook, if you haven't set up the configuration, refer to notebook [01-configure](01-configure.ipynb).\n",
+ "\n",
+ "+ If you are familiar with DataJoint and the workflow structure, proceed directly to the next notebook [03-process](03-process.ipynb) to run the workflow.\n",
+ "\n",
+ "+ For a more thorough introduction of DataJoint functions, please visit our general tutorial site - [DataJoint CodeBook](https://codebook.datajoint.io).\n",
+ "\n",
+ "To load the local configuration, we will change the directory to the package root."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Schemas and tables\n",
+ "\n",
+ "+ The current workflow is composed of multiple database schemas, each of them corresponds to a module within `workflow_miniscope.pipeline`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import datajoint as dj\n",
+ "from workflow_miniscope.pipeline import lab, subject, session, miniscope"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ Each module contains a schema object that enables interaction with the schema in the database."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "Each module imported above corresponds to one schema inside the database. For example, `ephys` corresponds to `neuro_ephys` schema in the database."
+ },
+ "outputs": [],
+ "source": [
+ "miniscope.schema"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ The table classes in the module corresponds to a table in the schema in the database."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "Each datajoint table class inside the module corresponds to a table inside the schema. For example, the class `ephys.EphysRecording` correponds to the table `_ephys_recording` in the schema `neuro_ephys` in the database."
+ },
+ "outputs": [],
+ "source": [
+ "# preview columns and contents in a table\n",
+ "miniscope.Processing()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "lines_to_next_cell": 0,
+ "title": "The first time importing the modules, empty schemas and tables will be created in the database."
+ },
+ "source": [
+ "+ By importing the modules for the first time, the schemas and tables will be created inside the database.\n",
+ "\n",
+ "+ Once created, importing modules will not create schemas and tables again, but the existing schemas/tables can be accessed and manipulated by the modules."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "lines_to_next_cell": 0,
+ "title": "The schemas and tables will not be re-created when importing modules if they have existed."
+ },
+ "source": [
+ "## DataJoint tools to explore schemas and tables\n",
+ "\n",
+ "+ `dj.list_schemas()`: list all schemas a user has access to in the current database"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "`dj.list_schemas()`: list all schemas a user could access."
+ },
+ "outputs": [],
+ "source": [
+ "dj.list_schemas()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ `dj.Diagram()`: plot tables and dependencies in a schema. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "`dj.Diagram()`: plot tables and dependencies"
+ },
+ "outputs": [],
+ "source": [
+ "# plot diagram for all tables in a schema\n",
+ "dj.Diagram(miniscope)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Table tiers**: \n",
+ "\n",
+ "+ Manual table\n",
+ " + Visually represented with a green box.\n",
+ " + Manually inserted table\n",
+ " + Expect new entries daily, e.g. Subject, Recording. \n",
+ "+ Lookup table\n",
+ " + Visually represented with a gray box.\n",
+ " + Pre-inserted table\n",
+ " + Commonly used for general facts or parameters. e.g. Strain, ProcessingParamSet. \n",
+ "+ Imported table\n",
+ " + Visually represented with a blue oval.\n",
+ " + Auto-processing table\n",
+ " + Processing depends on the importing of external files. e.g. `Processing` requires output files from CaImAn. \n",
+ "+ Computed table\n",
+ " + Visually represented with a red circle.\n",
+ " + Auto-processing table\n",
+ " + Processing does not depend on files external to the database. \n",
+ "+ Part table\n",
+ " + Visually represented with plain text.\n",
+ " + As an appendix to the master table, all the part entries of a given master entry represent a intact set of the master entry. e.g. `Mask` of a `Segmentation`.\n",
+ "\n",
+ "**Dependencies**: \n",
+ "\n",
+ "+ One-to-one primary\n",
+ " + Visually represented with a thick solid line.\n",
+ " + Share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key. \n",
+ "+ One-to-many primary\n",
+ " + Visually represented with a thin solid line.\n",
+ " + Inherit the primary key from the parent table, but have additional field(s) as part of the primary key as well.\n",
+ "+ Secondary dependency\n",
+ " + Visually represented with a dashed line.\n",
+ " + The child table inherits the primary key fields from parent table as its own secondary attribute."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "`dj.Diagram()`: plot the diagram of the tables and dependencies. It could be used to plot tables in a schema or selected tables."
+ },
+ "outputs": [],
+ "source": [
+ "# plot diagram of tables in multiple schemas\n",
+ "dj.Diagram(subject) + dj.Diagram(session) + dj.Diagram(miniscope)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot diagram of selected tables and schemas\n",
+ "dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + dj.Diagram(miniscope)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "lines_to_next_cell": 0,
+ "title": "`heading`:"
+ },
+ "source": [
+ "+ `describe()`: show table definition with foreign key references."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Processing.describe();"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ `heading`: show attribute definitions regardless of foreign key references"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "`heading`: show table attributes regardless of foreign key references."
+ },
+ "outputs": [],
+ "source": [
+ "miniscope.Processing.heading"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "title": "ephys"
+ },
+ "source": [
+ "# DataJoint Elements installed in `workflow-miniscope`\n",
+ "\n",
+ "+ [`lab`](https://github.com/datajoint/element-lab): lab management related information, such as Lab, User, Project, Protocol, Source."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.Diagram(lab)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ [`subject`](https://github.com/datajoint/element-animal): general animal information, such as User, Genetic background."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.Diagram(subject)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "[subject](https://github.com/datajoint/element-animal): contains the basic information of subject, including Strain, Line, Subject, Zygosity, and SubjectDeath information."
+ },
+ "outputs": [],
+ "source": [
+ "subject.Subject.describe();"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ [`session`](https://github.com/datajoint/element-session): General information of experimental sessions."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.Diagram(session)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "[session](https://github.com/datajoint/element-session): experimental session information"
+ },
+ "outputs": [],
+ "source": [
+ "session.Session.describe();"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "+ [`miniscope`](https://github.com/datajoint/element-miniscope): miniscope raw recording and processed data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "title": "[probe and ephys](https://github.com/datajoint/element-array-ephys): Neuropixel based probe and ephys tables"
+ },
+ "outputs": [],
+ "source": [
+ "dj.Diagram(miniscope)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Summary and next step\n",
+ "\n",
+ "+ This notebook introduced the overall structures of the schemas and tables in the workflow and relevant tools to explore the schema structure and table definitions.\n",
+ "\n",
+ "+ In the next notebook [03-process](03-process.ipynb), we will introduce the detailed steps to run through the workflow."
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "encoding": "# -*- coding: utf-8 -*-",
+ "formats": "ipynb,scripts//py"
+ },
+ "kernelspec": {
+ "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)",
+ "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebooks/03-automate.ipynb b/notebooks/03-automate.ipynb
deleted file mode 100644
index 91a5ebf..0000000
--- a/notebooks/03-automate.ipynb
+++ /dev/null
@@ -1,168 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# [WIP] Automated workflow\n",
- "### Method for inserting entries\n",
- "\n",
- "Modify `user_data/subjects.csv` and `user_data/sessions.csv`, and run the following commands\n",
- "\n",
- "or with the `ingest` method and accompanying `csv` files."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "# Change into the parent directory to find the `dj_local_conf.json` file. \n",
- "# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.\n",
- "import os\n",
- "os.chdir('..')"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from workflow_miniscope.pipeline import *\n",
- "from workflow_miniscope.ingest import ingest_subjects, ingest_sessions"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [
- {
- "output_type": "stream",
- "name": "stdout",
- "text": [
- "\n---- Insert 1 entry(s) into subject.Subject ----\n\n---- Successfully completed ingest_subjects ----\n"
- ]
- }
- ],
- "source": [
- "ingest_subjects()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "output_type": "error",
- "ename": "FileNotFoundError",
- "evalue": "Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: /Users/kabilar/Documents/Data/U24/imaging_sample_data/pingping_miniscope/session0",
- "traceback": [
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
- "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
- "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mingest_sessions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
- "\u001b[0;32m~/Documents/GitHub/workflow-miniscope/workflow_miniscope/ingest.py\u001b[0m in \u001b[0;36mingest_sessions\u001b[0;34m(session_csv_path)\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 42\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mFileNotFoundError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf'Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: {sess_dir}'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 43\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0macq_software\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m'Miniscope-DAQ-V3'\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
- "\u001b[0;31mFileNotFoundError\u001b[0m: Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: /Users/kabilar/Documents/Data/U24/imaging_sample_data/pingping_miniscope/session0"
- ]
- }
- ],
- "source": [
- "ingest_sessions()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "import pathlib\n",
- "from workflow_miniscope.paths import get_imaging_root_data_dir\n",
- "\n",
- "root_dir = pathlib.Path(get_imaging_root_data_dir())\n",
- "\n",
- "for scan_key in (scan.Scan & scan.ScanInfo - imaging.ProcessingTask).fetch('KEY'):\n",
- " scan_file = root_dir / (scan.ScanInfo.ScanFile & scan_key).fetch('file_path')[0]\n",
- " recording_dir = scan_file.parent\n",
- "\n",
- " miniscope_analysis_dir = recording_dir / 'miniscope_analysis'\n",
- " if miniscope_analysis_dir.exists():\n",
- " imaging.ProcessingTask.insert1({**scan_key,\n",
- " 'paramset_idx': 0,\n",
- " 'processing_output_dir': miniscope_analysis_dir.as_posix()})"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "+ To this end, we make use of a convenient function `imaging.Curation().create1_from_processing_task()`"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "for key in (imaging.ProcessingTask - imaging.Curation).fetch('KEY'):\n",
- " imaging.Curation().create1_from_processing_task(key)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "### Method for populating tables"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": [
- "from workflow_miniscope.populate import populate\n",
- "populate(display_progress=False)"
- ]
- }
- ],
- "metadata": {
- "jupytext": {
- "formats": "ipynb,py"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.10"
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3.7.10 64-bit ('workflow-miniscope': conda)",
- "metadata": {
- "interpreter": {
- "hash": "fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c"
- }
- }
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
\ No newline at end of file
diff --git a/notebooks/03-automate.py b/notebooks/03-automate.py
deleted file mode 100644
index adb1f09..0000000
--- a/notebooks/03-automate.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# ---
-# jupyter:
-# jupytext:
-# formats: ipynb,py
-# text_representation:
-# extension: .py
-# format_name: light
-# format_version: '1.5'
-# jupytext_version: 1.11.1
-# kernelspec:
-# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)'
-# metadata:
-# interpreter:
-# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c
-# name: python3
-# ---
-
-# # [WIP] Automated workflow
-# ### Method for inserting entries
-#
-# Modify `user_data/subjects.csv` and `user_data/sessions.csv`, and run the following commands
-#
-# or with the `ingest` method and accompanying `csv` files.
-
-# Change into the parent directory to find the `dj_local_conf.json` file.
-# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.
-import os
-os.chdir('..')
-
-from workflow_miniscope.pipeline import *
-from workflow_miniscope.ingest import ingest_subjects, ingest_sessions
-
-ingest_subjects()
-
-ingest_sessions()
-
-# +
-import pathlib
-from workflow_miniscope.paths import get_imaging_root_data_dir
-
-root_dir = pathlib.Path(get_imaging_root_data_dir())
-
-for scan_key in (scan.Scan & scan.ScanInfo - imaging.ProcessingTask).fetch('KEY'):
- scan_file = root_dir / (scan.ScanInfo.ScanFile & scan_key).fetch('file_path')[0]
- recording_dir = scan_file.parent
-
- miniscope_analysis_dir = recording_dir / 'miniscope_analysis'
- if miniscope_analysis_dir.exists():
- imaging.ProcessingTask.insert1({**scan_key,
- 'paramset_idx': 0,
- 'processing_output_dir': miniscope_analysis_dir.as_posix()})
-# -
-
-
-
-# + To this end, we make use of a convenient function `imaging.Curation().create1_from_processing_task()`
-
-for key in (imaging.ProcessingTask - imaging.Curation).fetch('KEY'):
- imaging.Curation().create1_from_processing_task(key)
-
-# ### Method for populating tables
-
-from workflow_miniscope.populate import populate
-populate(display_progress=False)
diff --git a/notebooks/03-process.ipynb b/notebooks/03-process.ipynb
new file mode 100644
index 0000000..807462a
--- /dev/null
+++ b/notebooks/03-process.ipynb
@@ -0,0 +1,605 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Interactively run miniscope workflow\n",
+ "\n",
+ "+ This notebook walks you through the steps in detail to run the `workflow-miniscope`. \n",
+ "\n",
+ "+ The workflow requires the data acquired from the UCLA Miniscope and Miniscope-DAQ software and processing with CaImAn.\n",
+ "\n",
+ "+ If you haven't configured the paths, refer to [01-configure](01-configure.ipynb).\n",
+ "\n",
+ "+ To overview the schema structures, refer to [02-workflow-structure](02-workflow-structure.ipynb).\n",
+ "\n",
+ "+ If you need a more automatic approach to run the workflow, refer to [04-automate](04-automate-optional.ipynb)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Let's change the directory to the package root directory to load the local configuration (`dj_local_conf.json`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')\n",
+ "import numpy as np"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## `Pipeline.py`\n",
+ "\n",
+ "+ This script `activates` the DataJoint `Elements` and declares other required tables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from workflow_miniscope.pipeline import *"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Schema diagrams\n",
+ "\n",
+ "+ The following outputs are the diagrams of the schemas comprising this workflow.\n",
+ "\n",
+ "+ Please refer back to these diagrams to visualize the relationships of different tables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + \\\n",
+ " dj.Diagram(AnatomicalLocation) + dj.Diagram(Equipment) + dj.Diagram(miniscope) "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert an entry into `subject.Subject`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "subject.Subject.heading"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "subject.Subject.insert1(dict(subject='subject1', \n",
+ " sex='F', \n",
+ " subject_birth_date='2020-01-01', \n",
+ " subject_description='UCLA Miniscope acquisition'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert an entry into `lab.Equipment`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Equipment.insert1(dict(acquisition_hardware='UCLA Miniscope'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert an entry into `session.Session`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.Session.describe();"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.Session.heading"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session_key = dict(subject='subject1', \n",
+ " session_datetime='2021-01-01 00:00:01')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.Session.insert1(session_key)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.Session()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert an entry into `session.SessionDirectory`\n",
+ "\n",
+ "+ The `session_dir` is the relative path to the `miniscope_root_data_dir` for the given session, in POSIX format with `/`.\n",
+ "\n",
+ "+ Instead of a relative path, `session_dir` could be an absolute path but it is not recommended as the absolute path would have to match the `miniscope_root_data_dir` in `dj_local_conf.json`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.SessionDirectory.describe();"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.SessionDirectory.heading"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.SessionDirectory.insert1(dict(**session_key, \n",
+ " session_dir='subject1/session1'))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "session.SessionDirectory()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert an entry into `miniscope.Recording`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Recording.heading"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "recording_key = dict(**session_key,\n",
+ " recording_id=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Recording.insert1(dict(**recording_key, \n",
+ " acquisition_hardware='UCLA Miniscope', \n",
+ " acquisition_software='Miniscope-DAQ-V4',\n",
+ " recording_directory='subject1/session1',\n",
+ " recording_notes='No notes for this session.'))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Recording()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Populate `miniscope.RecordingInfo`\n",
+ "\n",
+ "+ This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.).\n",
+ "+ `populate` automatically calls `make` for every key for which the auto-populated table is missing data.\n",
+ "+ `populate_settings` passes arguments to the `populate` method.\n",
+ "+ `display_progress=True` reports the progress bar"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.RecordingInfo.describe();"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.RecordingInfo.heading"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "populate_settings = {'display_progress': True}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.RecordingInfo.populate(**populate_settings)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.RecordingInfo()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert a new entry into `miniscope.ProcessingParamSet` for CaImAn\n",
+ "\n",
+ "+ Define and insert the parameters that will be used for the CaImAn processing.\n",
+ "\n",
+ "+ This step is not needed if you are using an existing ProcessingParamSet.\n",
+ "\n",
+ "### Define CaImAn parameters"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "params = dict(decay_time=0.4,\n",
+ " pw_rigid=False,\n",
+ " max_shifts= (5, 5),\n",
+ " gSig_filt=(3, 3),\n",
+ " strides=(48, 48),\n",
+ " overlaps=(24, 24),\n",
+ " max_deviation_rigid=3,\n",
+ " border_nan='copy',\n",
+ " method_init='corr_pnr',\n",
+ " K=None,\n",
+ " gSig=(3, 3),\n",
+ " gSiz=(13, 13),\n",
+ " merge_thr=0.7,\n",
+ " p=1,\n",
+ " tsub=2,\n",
+ " ssub=1,\n",
+ " rf=40,\n",
+ " stride=20,\n",
+ " only_init=True,\n",
+ " nb=0,\n",
+ " nb_patch=0,\n",
+ " method_deconvolution='oasis',\n",
+ " low_rank_background=None,\n",
+ " update_background_components=True,\n",
+ " min_corr=0.8,\n",
+ " min_pnr=10,\n",
+ " normalize_init=False,\n",
+ " center_psf=True,\n",
+ " ssub_B=2,\n",
+ " ring_size_factor=1.4,\n",
+ " del_duplicates=True,\n",
+ " border_pix=0,\n",
+ " min_SNR=3,\n",
+ " rval_thr=0.85,\n",
+ " use_cnn=False,\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Insert CaImAn parameters\n",
+ "\n",
+ "+ A method of the class `ProcessingParamset` called `insert_new_params` is a helper function to insert the CaImAn parameters and ensures that the parameter set inserted is not duplicated."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.ProcessingParamSet.insert_new_params(\n",
+ " processing_method='caiman', \n",
+ " paramset_id=0, \n",
+ " paramset_desc='Calcium imaging analysis with CaImAn using default parameters',\n",
+ " params=params)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert new ProcessingTask to trigger analysis and ingestion of motion correction and segmentation results\n",
+ "\n",
+ "+ Motion correction and segmentation are performed for each recording in CaImAn.\n",
+ "\n",
+ "+ If `task_mode=trigger`, this entry will trigger running analysis (i.e. motion correction, segmentation, and traces) within the `miniscope.Processing` table.\n",
+ "\n",
+ "+ If the `task_mode=load` this step ensures that the output directory contains the valid processed outputs.\n",
+ "\n",
+ "+ The `paramset_id` is the parameter set stored in `miniscope.ProcessingParamSet` that is used for the imaging processing.\n",
+ " \n",
+ "+ The `processing_output_dir` stores the directory of the processing results (relative to the miniscope root data directory)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.ProcessingTask.insert1(dict(**recording_key,\n",
+ " paramset_id=0,\n",
+ " processing_output_dir='subject1/session1/caiman',\n",
+ " task_mode='trigger'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Populate `miniscope.Processing`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Processing.populate(**populate_settings)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Insert new Curation following the ProcessingTask\n",
+ "\n",
+ "+ The next step in the pipeline is the curation of motion correction and segmentation results.\n",
+ "\n",
+ "+ If a manual curation was implemented, an entry needs to be manually inserted into the table `miniscope.Curation`, which specifies the directory to the curated results in `curation_output_dir`. \n",
+ "\n",
+ "+ If we would like to use the processed outcome directly, an entry is also needed in `miniscope.Curation`. A method `create1_from_processing_task` was provided to help this insertion. It copies the `processing_output_dir` in `miniscope.ProcessingTask` to the field `curation_output_dir` in the table `miniscope.Curation` with a new `curation_id`.\n",
+ "\n",
+ " + In this example, we create/insert one `miniscope.Curation` for each `miniscope.ProcessingTask`, specifying the same output directory.\n",
+ "\n",
+ " + To this end, we could also make use of a convenient function `miniscope.Curation().create1_from_processing_task()`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Curation.insert1(dict(**recording_key,\n",
+ " paramset_id=0,\n",
+ " curation_id=0,\n",
+ " curation_time='2022-04-30 12:22:15', \n",
+ " curation_output_dir='subject1/session1/caiman',\n",
+ " manual_curation=False,\n",
+ " curation_note=''))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Populate `miniscope.MotionCorrection`\n",
+ "\n",
+ "+ This table contains the rigid or non-rigid motion correction data including the shifts and summary images.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.MotionCorrection.populate(**populate_settings)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Populate `miniscope.Segmentation`\n",
+ "\n",
+ "+ This table contains the mask coordinates, weights, and centers.\n",
+ "+ This table also inserts the data into `MaskClassification`, which is the classification of the segmented masks and the confidence of classification."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Segmentation.populate(**populate_settings)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Add another set of results from a new round of curation\n",
+ "\n",
+ "If you performed curation on an existing processed results (i.e. motion correction or segmentation) then:\n",
+ " \n",
+ "+ Add an entry into `miniscope.Curation` with the directory of the curated results and a new `curation_id`.\n",
+ "\n",
+ "+ Populate the `miniscope.MotionCorrection` and `miniscope.Segmentation` tables again."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Populate `miniscope.Fluorescence`\n",
+ "\n",
+ "+ This table contains the fluorescence traces prior to filtering and spike extraction."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Fluorescence.populate(**populate_settings)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Populate `miniscope.Activity`\n",
+ "+ This table contains the inferred neural activity from the fluorescence traces."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "miniscope.Activity.populate(**populate_settings)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Next steps\n",
+ "\n",
+ "+ Proceed to the [05-explore](05-explore.ipynb) to learn how to query, fetch, and visualize the imaging data."
+ ]
+ }
+ ],
+ "metadata": {
+ "interpreter": {
+ "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe"
+ },
+ "jupytext": {
+ "formats": "ipynb,scripts//py"
+ },
+ "kernelspec": {
+ "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/06-drop-optional.ipynb b/notebooks/06-drop-optional.ipynb
index e2d0bb5..e955455 100644
--- a/notebooks/06-drop-optional.ipynb
+++ b/notebooks/06-drop-optional.ipynb
@@ -1,43 +1,22 @@
{
- "metadata": {
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.9"
- },
- "orig_nbformat": 2,
- "kernelspec": {
- "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c",
- "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 2,
"cells": [
{
+ "cell_type": "markdown",
+ "metadata": {},
"source": [
"# Drop schemas\n",
"\n",
"+ Schemas are not typically dropped in a production workflow with real data in it. \n",
"+ At the developmental phase, it might be required for the table redesign.\n",
"+ When dropping all schemas is needed, the following is the dependency order."
- ],
- "cell_type": "markdown",
- "metadata": {}
+ ]
},
{
+ "cell_type": "markdown",
+ "metadata": {},
"source": [
"Change into the parent directory to find the `dj_local_conf.json` file. "
- ],
- "cell_type": "markdown",
- "metadata": {}
+ ]
},
{
"cell_type": "code",
@@ -46,7 +25,7 @@
"outputs": [],
"source": [
"import os\n",
- "os.chdir('..')"
+ "if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')"
]
},
{
@@ -77,5 +56,28 @@
"outputs": [],
"source": []
}
- ]
-}
\ No newline at end of file
+ ],
+ "metadata": {
+ "jupytext": {
+ "formats": "ipynb,scripts//py"
+ },
+ "kernelspec": {
+ "display_name": "Python 3.7.9 64-bit ('workflow-calcium-imaging': conda)",
+ "name": "python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/scripts/00-data-download-optional.py b/notebooks/scripts/00-data-download-optional.py
new file mode 100644
index 0000000..23781da
--- /dev/null
+++ b/notebooks/scripts/00-data-download-optional.py
@@ -0,0 +1,72 @@
+# ---
+# jupyter:
+# jupytext:
+# formats: ipynb,scripts//py
+# text_representation:
+# extension: .py
+# format_name: light
+# format_version: '1.5'
+# jupytext_version: 1.13.7
+# kernelspec:
+# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
+# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c
+# ---
+
+# # Download example dataset
+#
+# + This workflow will need miniscope calcium imaging data collected from the UCLA Miniscope and processed with CaImAn. We provide an example dataset to be downloaded to run through the workflow. This notebook walks you through the process to download the dataset.
+#
+# ## Install `djarchive-client`
+#
+# + The example dataset is hosted on `djarchive`, an AWS storage.
+#
+# + We provide a client package, [djarchive-client](https://github.com/datajoint/djarchive-client), to download the data which can be installed with pip:
+
+pip install git+https://github.com/datajoint/djarchive-client.git
+
+# ## Download example datasets using `djarchive-client`
+
+import djarchive_client
+client = djarchive_client.client()
+
+# Browse the datasets that are available on `djarchive`:
+
+list(client.datasets())
+
+# Browse the different versions of each dataset:
+
+list(client.revisions())
+
+# To download the dataset, let's prepare a directory, for example in `/tmp`:
+
+import os
+os.mkdir('/tmp/example_data')
+
+# Run download for a given dataset and revision:
+
+client.download('workflow-miniscope-test-set', target_directory='/tmp/example_data', revision='v1')
+
+# ## Directory structure
+#
+# + After downloading, the directory will be organized as follows:
+#
+# ```
+# /tmp/example_data/
+# - subject1/
+# - session1/
+# - 0.avi
+# - metaData.json
+# - timeStamps.csv
+# - caiman/
+# - subject1_session1.hdf5
+# ```
+#
+# + subject 1 data is recorded with the UCLA Miniscope and Miniscope-DAQ-V4 acquisition software, and processed with CaImAn.
+#
+# + We will use the dataset for subject 1 as an example for the rest of the notebooks. If you use your own dataset for the workflow, change the path accordingly.
+#
+# ## Next step
+#
+# + In the next notebook ([01-configure](01-configure.ipynb)) we will set up the configuration file for the workflow.
+
+#
diff --git a/notebooks/scripts/01-configure.py b/notebooks/scripts/01-configure.py
new file mode 100644
index 0000000..fd88344
--- /dev/null
+++ b/notebooks/scripts/01-configure.py
@@ -0,0 +1,94 @@
+# ---
+# jupyter:
+# jupytext:
+# formats: ipynb,scripts//py
+# text_representation:
+# extension: .py
+# format_name: light
+# format_version: '1.5'
+# jupytext_version: 1.13.7
+# kernelspec:
+# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
+# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c
+# ---
+
+# # Configure DataJoint connection to the database
+#
+# + To run `workflow-miniscope`, we need to properly set up the DataJoint configuration. The configuration will be saved in a file called `dj_local_conf.json` on each machine and this notebook walks you through the process.
+#
+# + The configuration only needs to be set up once. If you have gone through the configuration before, directly go to [02-workflow-structure](02-workflow-structure-optional.ipynb).
+#
+# ## Set up configuration in root directory of this package
+#
+# + As a convention, we set the configuration up in the root directory of the `workflow-miniscope` package and always start importing DataJoint and pipeline modules from there.
+
+import os
+if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
+
+import datajoint as dj
+
+# ## Configure database host address and credentials
+#
+# Now let's set up the host, user and password in the `dj.config` global variable
+
+import getpass
+dj.config['database.host'] = '{YOUR_HOST}'
+dj.config['database.user'] = '{YOUR_USERNAME}'
+dj.config['database.password'] = getpass.getpass() # enter the password securily
+
+# You should be able to connect to the database at this stage.
+
+dj.conn()
+
+# ## Configure the `custom` field in `dj.config` for element-miniscope
+#
+# + The major component of the current workflow is the [DataJoint element-miniscope](https://github.com/datajoint/element-miniscope). `element-miniscope` requires configurations in the field `custom` in `dj.config`:
+#
+# ### Database prefix
+#
+# + Giving a prefix to schema could help on the configuration of privilege settings. For example, if we set prefix `neuro_`, every schema created with the current workflow will start with `neuro_`, e.g. `neuro_lab`, `neuro_subject`, `neuro_session`, and `neuro_miniscope`.
+#
+# + The prefix could be configurated as follows in `dj.config`:
+
+dj.config['custom'] = {'database.prefix': 'neuro_'}
+
+# ### Root directories for miniscope calcium imaging raw data and processed results
+#
+# + `miniscope_root_data_dir` field indicates the root directory for the miniscope raw data from the Miniscope-DAQ acquisition software (e.g. `*.avi`) or the processed results from CaImAn (e.g. `*.hdf5`). The root path typically do not contain information of subjects or sessions, all data from subjects/sessions should be subdirectories in the root path.
+#
+# + In the database, every path for the raw miniscope data is relative to this root path. The benefit is that the absolute path could be configured for each machine, and when data transfer happens, we just need to change the root directory in the config file.
+#
+# + The workflow supports multiple root directories. If there are multiple possible root directories, specify the `miniscope_root_data_dir` as a list.
+#
+# + The root path(s) are specific to each machine, as the name of drive mount could be different for different operating systems or machines.
+#
+# + In the context of the workflow, all the paths saved into the database or saved in the config file need to be in the POSIX standards (Unix/Linux), with `/`. The path conversion for machines of any operating system is taken care of inside the elements.
+
+# If using our example dataset, downloaded with this notebook [00-data-download](00-data-download-optional.ipynb), the root directory will be:
+
+# If there is only one root path:
+dj.config['custom']['miniscope_root_data_dir'] = '/tmp/example_data'
+# If there are multiple possible root paths:
+dj.config['custom']['miniscope_root_data_dir'] = ['/tmp/example_data']
+
+dj.config
+
+# ## Save the configuration as a json file
+#
+# With the proper configurations, we could save this as a file, either as a local json file, or a global file.
+
+dj.config.save_local()
+
+# ls
+
+# Local configuration file is saved as `dj_local_conf.json` in the root directory of this package `workflow-miniscope`. Next time if you change your directory to `workflow-miniscope` before importing DataJoint and the pipeline packages, the configurations will get properly loaded.
+#
+# If saved globally, there will be a hidden configuration file saved in your root directory. The configuration will be loaded no matter where the directory is.
+
+# +
+# dj.config.save_global()
+# -
+
+# ## Next Step
+#
+# After the configuration, we will be able to run through the workflow with the [02-workflow-structure](02-workflow-structure-optional.ipynb) notebook.
diff --git a/notebooks/scripts/02-workflow-structure-optional.py b/notebooks/scripts/02-workflow-structure-optional.py
new file mode 100644
index 0000000..b6d0b18
--- /dev/null
+++ b/notebooks/scripts/02-workflow-structure-optional.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+# ---
+# jupyter:
+# jupytext:
+# formats: ipynb,scripts//py
+# text_representation:
+# extension: .py
+# format_name: light
+# format_version: '1.5'
+# jupytext_version: 1.13.7
+# kernelspec:
+# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
+# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c
+# ---
+
+# # Introduction to the workflow structure
+#
+# This notebook gives a brief overview of the workflow structure and introduces some useful DataJoint tools to facilitate the exploration.
+#
+# + DataJoint needs to be pre-configured before running this notebook, if you haven't set up the configuration, refer to notebook [01-configure](01-configure.ipynb).
+#
+# + If you are familiar with DataJoint and the workflow structure, proceed directly to the next notebook [03-process](03-process.ipynb) to run the workflow.
+#
+# + For a more thorough introduction of DataJoint functions, please visit our general tutorial site - [DataJoint CodeBook](https://codebook.datajoint.io).
+#
+# To load the local configuration, we will change the directory to the package root.
+
+import os
+if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
+
+# ## Schemas and tables
+#
+# + The current workflow is composed of multiple database schemas, each of them corresponds to a module within `workflow_miniscope.pipeline`
+
+import datajoint as dj
+from workflow_miniscope.pipeline import lab, subject, session, miniscope
+
+# + Each module contains a schema object that enables interaction with the schema in the database.
+
+# + Each module imported above corresponds to one schema inside the database. For example, `ephys` corresponds to `neuro_ephys` schema in the database.
+miniscope.schema
+
+# + The table classes in the module corresponds to a table in the schema in the database.
+
+# + Each datajoint table class inside the module corresponds to a table inside the schema. For example, the class `ephys.EphysRecording` correponds to the table `_ephys_recording` in the schema `neuro_ephys` in the database.
+# preview columns and contents in a table
+miniscope.Processing()
+
+# + The first time importing the modules, empty schemas and tables will be created in the database. [markdown]
+# # + By importing the modules for the first time, the schemas and tables will be created inside the database.
+#
+# # + Once created, importing modules will not create schemas and tables again, but the existing schemas/tables can be accessed and manipulated by the modules.
+# + The schemas and tables will not be re-created when importing modules if they have existed. [markdown]
+# ## DataJoint tools to explore schemas and tables
+#
+# # + `dj.list_schemas()`: list all schemas a user has access to in the current database
+# + `dj.list_schemas()`: list all schemas a user could access.
+dj.list_schemas()
+
+# + `dj.Diagram()`: plot tables and dependencies in a schema.
+
+# + `dj.Diagram()`: plot tables and dependencies
+# plot diagram for all tables in a schema
+dj.Diagram(miniscope)
+# -
+
+# **Table tiers**:
+#
+# + Manual table
+# + Visually represented with a green box.
+# + Manually inserted table
+# + Expect new entries daily, e.g. Subject, Recording.
+# + Lookup table
+# + Visually represented with a gray box.
+# + Pre-inserted table
+# + Commonly used for general facts or parameters. e.g. Strain, ProcessingParamSet.
+# + Imported table
+# + Visually represented with a blue oval.
+# + Auto-processing table
+# + Processing depends on the importing of external files. e.g. `Processing` requires output files from CaImAn.
+# + Computed table
+# + Visually represented with a red circle.
+# + Auto-processing table
+# + Processing does not depend on files external to the database.
+# + Part table
+# + Visually represented with plain text.
+# + As an appendix to the master table, all the part entries of a given master entry represent a intact set of the master entry. e.g. `Mask` of a `Segmentation`.
+#
+# **Dependencies**:
+#
+# + One-to-one primary
+# + Visually represented with a thick solid line.
+# + Share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key.
+# + One-to-many primary
+# + Visually represented with a thin solid line.
+# + Inherit the primary key from the parent table, but have additional field(s) as part of the primary key as well.
+# + Secondary dependency
+# + Visually represented with a dashed line.
+# + The child table inherits the primary key fields from parent table as its own secondary attribute.
+
+# + `dj.Diagram()`: plot the diagram of the tables and dependencies. It could be used to plot tables in a schema or selected tables.
+# plot diagram of tables in multiple schemas
+dj.Diagram(subject) + dj.Diagram(session) + dj.Diagram(miniscope)
+# -
+
+# plot diagram of selected tables and schemas
+dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + dj.Diagram(miniscope)
+
+# + `heading`: [markdown]
+# # + `describe()`: show table definition with foreign key references.
+# -
+miniscope.Processing.describe();
+
+# + `heading`: show attribute definitions regardless of foreign key references
+
+# + `heading`: show table attributes regardless of foreign key references.
+miniscope.Processing.heading
+
+# + ephys [markdown]
+# # DataJoint Elements installed in `workflow-miniscope`
+#
+# # + [`lab`](https://github.com/datajoint/element-lab): lab management related information, such as Lab, User, Project, Protocol, Source.
+# -
+
+dj.Diagram(lab)
+
+# + [`subject`](https://github.com/datajoint/element-animal): general animal information, such as User, Genetic background.
+
+dj.Diagram(subject)
+
+# + [subject](https://github.com/datajoint/element-animal): contains the basic information of subject, including Strain, Line, Subject, Zygosity, and SubjectDeath information.
+subject.Subject.describe();
+
+# + [`session`](https://github.com/datajoint/element-session): General information of experimental sessions.
+
+dj.Diagram(session)
+
+# + [session](https://github.com/datajoint/element-session): experimental session information
+session.Session.describe();
+
+# + [`miniscope`](https://github.com/datajoint/element-miniscope): miniscope raw recording and processed data
+
+# + [probe and ephys](https://github.com/datajoint/element-array-ephys): Neuropixel based probe and ephys tables
+dj.Diagram(miniscope)
+# -
+
+# ## Summary and next step
+#
+# + This notebook introduced the overall structures of the schemas and tables in the workflow and relevant tools to explore the schema structure and table definitions.
+#
+# + In the next notebook [03-process](03-process.ipynb), we will introduce the detailed steps to run through the workflow.
diff --git a/notebooks/scripts/03-process.py b/notebooks/scripts/03-process.py
new file mode 100644
index 0000000..3bf245b
--- /dev/null
+++ b/notebooks/scripts/03-process.py
@@ -0,0 +1,253 @@
+# ---
+# jupyter:
+# jupytext:
+# formats: ipynb,scripts//py
+# text_representation:
+# extension: .py
+# format_name: light
+# format_version: '1.5'
+# jupytext_version: 1.13.7
+# kernelspec:
+# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
+# language: python
+# name: python3
+# ---
+
+# # Interactively run miniscope workflow
+#
+# + This notebook walks you through the steps in detail to run the `workflow-miniscope`.
+#
+# + The workflow requires the data acquired from the UCLA Miniscope and Miniscope-DAQ software and processing with CaImAn.
+#
+# + If you haven't configured the paths, refer to [01-configure](01-configure.ipynb).
+#
+# + To overview the schema structures, refer to [02-workflow-structure](02-workflow-structure.ipynb).
+#
+# + If you need a more automatic approach to run the workflow, refer to [04-automate](04-automate-optional.ipynb).
+
+# Let's change the directory to the package root directory to load the local configuration (`dj_local_conf.json`).
+
+import os
+if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
+import numpy as np
+
+# ## `Pipeline.py`
+#
+# + This script `activates` the DataJoint `Elements` and declares other required tables.
+
+from workflow_miniscope.pipeline import *
+
+# ## Schema diagrams
+#
+# + The following outputs are the diagrams of the schemas comprising this workflow.
+#
+# + Please refer back to these diagrams to visualize the relationships of different tables.
+
+dj.Diagram(subject.Subject) + dj.Diagram(session.Session) + \
+ dj.Diagram(AnatomicalLocation) + dj.Diagram(Equipment) + dj.Diagram(miniscope)
+
+# ## Insert an entry into `subject.Subject`
+
+subject.Subject.heading
+
+subject.Subject.insert1(dict(subject='subject1',
+ sex='F',
+ subject_birth_date='2020-01-01',
+ subject_description='UCLA Miniscope acquisition'))
+
+# ## Insert an entry into `lab.Equipment`
+
+Equipment.insert1(dict(acquisition_hardware='UCLA Miniscope'))
+
+# ## Insert an entry into `session.Session`
+
+session.Session.describe();
+
+session.Session.heading
+
+session_key = dict(subject='subject1',
+ session_datetime='2021-01-01 00:00:01')
+
+session.Session.insert1(session_key)
+
+session.Session()
+
+# ## Insert an entry into `session.SessionDirectory`
+#
+# + The `session_dir` is the relative path to the `miniscope_root_data_dir` for the given session, in POSIX format with `/`.
+#
+# + Instead of a relative path, `session_dir` could be an absolute path but it is not recommended as the absolute path would have to match the `miniscope_root_data_dir` in `dj_local_conf.json`.
+
+session.SessionDirectory.describe();
+
+session.SessionDirectory.heading
+
+session.SessionDirectory.insert1(dict(**session_key,
+ session_dir='subject1/session1'))
+
+session.SessionDirectory()
+
+# ## Insert an entry into `miniscope.Recording`
+
+miniscope.Recording.heading
+
+recording_key = dict(**session_key,
+ recording_id=0)
+
+miniscope.Recording.insert1(dict(**recording_key,
+ acquisition_hardware='UCLA Miniscope',
+ acquisition_software='Miniscope-DAQ-V4',
+ recording_directory='subject1/session1',
+ recording_notes='No notes for this session.'))
+
+miniscope.Recording()
+
+# ## Populate `miniscope.RecordingInfo`
+#
+# + This imported table stores information about the acquired image (e.g. image dimensions, file paths, etc.).
+# + `populate` automatically calls `make` for every key for which the auto-populated table is missing data.
+# + `populate_settings` passes arguments to the `populate` method.
+# + `display_progress=True` reports the progress bar
+
+miniscope.RecordingInfo.describe();
+
+miniscope.RecordingInfo.heading
+
+populate_settings = {'display_progress': True}
+
+miniscope.RecordingInfo.populate(**populate_settings)
+
+miniscope.RecordingInfo()
+
+# ## Insert a new entry into `miniscope.ProcessingParamSet` for CaImAn
+#
+# + Define and insert the parameters that will be used for the CaImAn processing.
+#
+# + This step is not needed if you are using an existing ProcessingParamSet.
+#
+# ### Define CaImAn parameters
+
+params = dict(decay_time=0.4,
+ pw_rigid=False,
+ max_shifts= (5, 5),
+ gSig_filt=(3, 3),
+ strides=(48, 48),
+ overlaps=(24, 24),
+ max_deviation_rigid=3,
+ border_nan='copy',
+ method_init='corr_pnr',
+ K=None,
+ gSig=(3, 3),
+ gSiz=(13, 13),
+ merge_thr=0.7,
+ p=1,
+ tsub=2,
+ ssub=1,
+ rf=40,
+ stride=20,
+ only_init=True,
+ nb=0,
+ nb_patch=0,
+ method_deconvolution='oasis',
+ low_rank_background=None,
+ update_background_components=True,
+ min_corr=0.8,
+ min_pnr=10,
+ normalize_init=False,
+ center_psf=True,
+ ssub_B=2,
+ ring_size_factor=1.4,
+ del_duplicates=True,
+ border_pix=0,
+ min_SNR=3,
+ rval_thr=0.85,
+ use_cnn=False,
+ )
+
+# ### Insert CaImAn parameters
+#
+# + A method of the class `ProcessingParamset` called `insert_new_params` is a helper function to insert the CaImAn parameters and ensures that the parameter set inserted is not duplicated.
+
+miniscope.ProcessingParamSet.insert_new_params(
+ processing_method='caiman',
+ paramset_id=0,
+ paramset_desc='Calcium imaging analysis with CaImAn using default parameters',
+ params=params)
+
+# ## Insert new ProcessingTask to trigger analysis and ingestion of motion correction and segmentation results
+#
+# + Motion correction and segmentation are performed for each recording in CaImAn.
+#
+# + If `task_mode=trigger`, this entry will trigger running analysis (i.e. motion correction, segmentation, and traces) within the `miniscope.Processing` table.
+#
+# + If the `task_mode=load` this step ensures that the output directory contains the valid processed outputs.
+#
+# + The `paramset_id` is the parameter set stored in `miniscope.ProcessingParamSet` that is used for the imaging processing.
+#
+# + The `processing_output_dir` stores the directory of the processing results (relative to the miniscope root data directory).
+
+miniscope.ProcessingTask.insert1(dict(**recording_key,
+ paramset_id=0,
+ processing_output_dir='subject1/session1/caiman',
+ task_mode='trigger'))
+
+# ## Populate `miniscope.Processing`
+
+miniscope.Processing.populate(**populate_settings)
+
+# ## Insert new Curation following the ProcessingTask
+#
+# + The next step in the pipeline is the curation of motion correction and segmentation results.
+#
+# + If a manual curation was implemented, an entry needs to be manually inserted into the table `miniscope.Curation`, which specifies the directory to the curated results in `curation_output_dir`.
+#
+# + If we would like to use the processed outcome directly, an entry is also needed in `miniscope.Curation`. A method `create1_from_processing_task` was provided to help this insertion. It copies the `processing_output_dir` in `miniscope.ProcessingTask` to the field `curation_output_dir` in the table `miniscope.Curation` with a new `curation_id`.
+#
+# + In this example, we create/insert one `miniscope.Curation` for each `miniscope.ProcessingTask`, specifying the same output directory.
+#
+# + To this end, we could also make use of a convenient function `miniscope.Curation().create1_from_processing_task()`
+
+miniscope.Curation.insert1(dict(**recording_key,
+ paramset_id=0,
+ curation_id=0,
+ curation_time='2022-04-30 12:22:15',
+ curation_output_dir='subject1/session1/caiman',
+ manual_curation=False,
+ curation_note=''))
+
+# ## Populate `miniscope.MotionCorrection`
+#
+# + This table contains the rigid or non-rigid motion correction data including the shifts and summary images.
+#
+
+miniscope.MotionCorrection.populate(**populate_settings)
+
+# ## Populate `miniscope.Segmentation`
+#
+# + This table contains the mask coordinates, weights, and centers.
+# + This table also inserts the data into `MaskClassification`, which is the classification of the segmented masks and the confidence of classification.
+
+miniscope.Segmentation.populate(**populate_settings)
+
+# ## Add another set of results from a new round of curation
+#
+# If you performed curation on an existing processed results (i.e. motion correction or segmentation) then:
+#
+# + Add an entry into `miniscope.Curation` with the directory of the curated results and a new `curation_id`.
+#
+# + Populate the `miniscope.MotionCorrection` and `miniscope.Segmentation` tables again.
+
+# ## Populate `miniscope.Fluorescence`
+#
+# + This table contains the fluorescence traces prior to filtering and spike extraction.
+
+miniscope.Fluorescence.populate(**populate_settings)
+
+# ## Populate `miniscope.Activity`
+# + This table contains the inferred neural activity from the fluorescence traces.
+
+miniscope.Activity.populate(**populate_settings)
+
+# ## Next steps
+#
+# + Proceed to the [05-explore](05-explore.ipynb) to learn how to query, fetch, and visualize the imaging data.
diff --git a/notebooks/04-drop.py b/notebooks/scripts/06-drop-optional.py
similarity index 56%
rename from notebooks/04-drop.py
rename to notebooks/scripts/06-drop-optional.py
index 0e9b3c1..6e9e4ba 100644
--- a/notebooks/04-drop.py
+++ b/notebooks/scripts/06-drop-optional.py
@@ -1,37 +1,32 @@
# ---
# jupyter:
# jupytext:
-# formats: ipynb,py:light
+# formats: ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
-# jupytext_version: 1.11.1
+# jupytext_version: 1.13.7
# kernelspec:
-# display_name: 'Python 3.7.10 64-bit (''workflow-miniscope'': conda)'
-# metadata:
-# interpreter:
-# hash: fb69438121b2de1073146525f02c3c959a56b9d2c1d6b0a62c34f01201323d1c
-# name: python3
+# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
+# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c
# ---
# # Drop schemas
#
-# + This notebook is NOT required.
# + Schemas are not typically dropped in a production workflow with real data in it.
# + At the developmental phase, it might be required for the table redesign.
# + When dropping all schemas is needed, the following is the dependency order.
# Change into the parent directory to find the `dj_local_conf.json` file.
-# When you type `import datajoint as dj` the credentials from `dj_local_conf.json` will be used to log into the database.
+
import os
-os.chdir('..')
+if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
from workflow_miniscope.pipeline import *
# +
-# imaging.schema.drop()
-# scan.schema.drop()
+# miniscope.schema.drop()
# session.schema.drop()
# subject.schema.drop()
# lab.schema.drop()
diff --git a/requirements.txt b/requirements.txt
index 602b81e..f8722c7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,6 +2,7 @@ datajoint>=0.13.0
element-lab==0.1.0b0
element-animal==0.1.0b0
element-session==0.1.0b0
-element-miniscope @ git+https://github.com/datajoint/element-miniscope.git
+element-miniscope==0.1.0
element-interface @ git+https://github.com/datajoint/element-interface.git
-djarchive-client @ git+https://github.com/datajoint/djarchive-client.git
\ No newline at end of file
+djarchive-client @ git+https://github.com/datajoint/djarchive-client.git
+jupytext==1.13.7
\ No newline at end of file
diff --git a/tests/user_data/sessions.csv b/tests/user_data/sessions.csv
deleted file mode 100644
index f7f9291..0000000
--- a/tests/user_data/sessions.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-subject,session_dir
-subject1,F:/U24/workflow_imaging_data/subject1/20200609_170519
-subject1,F:/U24/workflow_imaging_data/subject1/20200609_171646
-subject2,F:/U24/workflow_imaging_data/subject2/20200420_1843959
-subject3,F:/U24/workflow_imaging_data/subject3/210107_run00_orientation_8dir
diff --git a/tests/user_data/subjects.csv b/tests/user_data/subjects.csv
deleted file mode 100644
index 538b942..0000000
--- a/tests/user_data/subjects.csv
+++ /dev/null
@@ -1,4 +0,0 @@
-subject,sex,subject_birth_date,subject_description
-subject1,F,2020-01-01 00:00:01,91760
-subject2,M,2020-01-01 00:00:01,90853
-subject3,F,2020-01-01 00:00:01,sbx-JC015
diff --git a/user_data/sessions.csv b/user_data/sessions.csv
index b51bb15..e7af20c 100644
--- a/user_data/sessions.csv
+++ b/user_data/sessions.csv
@@ -1,2 +1,2 @@
-subject,session_dir
-subject1,subject1/session1
\ No newline at end of file
+subject,session_dir,acquisition_software
+subject1,subject1/session1,Miniscope-DAQ-V4
\ No newline at end of file
diff --git a/workflow_miniscope/ingest.py b/workflow_miniscope/ingest.py
index 7f6f887..500fa5f 100644
--- a/workflow_miniscope/ingest.py
+++ b/workflow_miniscope/ingest.py
@@ -1,13 +1,14 @@
import pathlib
import csv
from datetime import datetime
+import json
-from .pipeline import subject, imaging, scan, session, Equipment
-from .paths import get_imaging_root_data_dir
-
+from .pipeline import subject, session, Equipment, miniscope
+from .paths import get_miniscope_root_data_dir
+from element_interface.utils import find_full_path, recursive_search
def ingest_subjects(subject_csv_path='./user_data/subjects.csv'):
- # -------------- Insert new "Subject" --------------
+ print('\n-------------- Insert new "Subject" --------------')
with open(subject_csv_path, newline= '') as f:
input_subjects = list(csv.DictReader(f, delimiter=','))
@@ -18,53 +19,69 @@ def ingest_subjects(subject_csv_path='./user_data/subjects.csv'):
def ingest_sessions(session_csv_path='./user_data/sessions.csv'):
- root_data_dir = get_imaging_root_data_dir()
- # ---------- Insert new "Session" and "Scan" ---------
+ print('\n---- Insert new `Session` and `Recording` ----')
with open(session_csv_path, newline='') as f:
input_sessions = list(csv.DictReader(f, delimiter=','))
- # Folder structure: root / subject / session / .avi (raw)
- session_list, session_dir_list, scan_list, scanner_list = [], [], [], []
-
- for sess in input_sessions:
- sess_dir = pathlib.Path(sess['session_dir'])
-
- # Search for Miniscope-DAQ-V3 files (in that order)
- for scan_pattern, scan_type, glob_func in zip(['ms*.avi'],
- ['Miniscope-DAQ-V3'],
- [sess_dir.glob]):
- scan_filepaths = [fp.as_posix() for fp in glob_func(scan_pattern)]
- if len(scan_filepaths):
- acq_software = scan_type
+ session_list, session_dir_list, recording_list, hardware_list = [], [], [], []
+
+ for single_session in input_sessions:
+ acquisition_software = single_session['acquisition_software']
+ if acquisition_software not in ['Miniscope-DAQ-V3', 'Miniscope-DAQ-V4']:
+ raise NotImplementedError(f'Not implemented for acquisition software of '
+ f'type {acquisition_software}.')
+
+ # Folder structure: root / subject / session / .avi (raw)
+ session_dir = pathlib.Path(single_session['session_dir'])
+ session_path = find_full_path(get_miniscope_root_data_dir(),
+ session_dir)
+ recording_filepaths = [file_path.as_posix() for file_path
+ in session_path.glob('*.avi')]
+ if not recording_filepaths:
+ raise FileNotFoundError(f'No .avi files found in '
+ f'{session_path}')
+
+ # Read Miniscope DAQ *.json file
+ for metadata_filepath in session_path.glob('metaData.json'):
+ try:
+ recording_time = datetime.fromtimestamp(
+ metadata_filepath.stat().st_ctime)
+ with open(metadata_filepath) as json_file:
+ recording_metadata = json.load(json_file)
+ acquisition_hardware = recursive_search('deviceType',
+ recording_metadata)
break
- else:
- raise FileNotFoundError(f'Unable to identify scan files from the supported acquisition softwares (Miniscope-DAQ-V3) at: {sess_dir}')
+ except OSError:
+ print(f'Could not find `deviceType` in Miniscope-DAQ json: '
+ f'{metadata_filepath}')
+ continue
- if acq_software == 'Miniscope-DAQ-V3':
- daq_v3_fp = pathlib.Path(scan_filepaths[0])
- recording_time = datetime.fromtimestamp(daq_v3_fp.stat().st_ctime)
- scanner = 'Miniscope-DAQ-V3'
- else:
- raise NotImplementedError(f'Processing scan from acquisition software of type {acq_software} is not yet implemented')
-
- session_key = {'subject': sess['subject'], 'session_datetime': recording_time}
+ session_key = dict(subject=single_session['subject'],
+ session_datetime=recording_time)
if session_key not in session.Session():
- scanner_list.append({'scanner': scanner})
+ hardware_list.append(dict(acquisition_hardware=acquisition_hardware))
+
session_list.append(session_key)
- scan_list.append({**session_key, 'scan_id': 0, 'scanner': scanner, 'acq_software': acq_software})
- session_dir_list.append({**session_key, 'session_dir': sess_dir.relative_to(root_data_dir).as_posix()})
+ session_dir_list.append(dict(**session_key,
+ session_dir=session_dir.as_posix()))
+
+ recording_list.append(dict(**session_key,
+ recording_id=0, # Assumes one recording per session
+ acquisition_hardware=acquisition_hardware,
+ acquisition_software=acquisition_software,
+ recording_directory=session_dir.as_posix()))
- print(f'\n---- Insert {len(set(val for dic in scanner_list for val in dic.values()))} entry(s) into experiment.Equipment ----')
- Equipment.insert(scanner_list, skip_duplicates=True)
+ print(f'\n---- Insert {len(set(val for dic in hardware_list for val in dic.values()))} entry(s) into lab.Equipment ----')
+ Equipment.insert(hardware_list, skip_duplicates=True)
print(f'\n---- Insert {len(session_list)} entry(s) into session.Session ----')
session.Session.insert(session_list)
session.SessionDirectory.insert(session_dir_list)
- print(f'\n---- Insert {len(scan_list)} entry(s) into scan.Scan ----')
- scan.Scan.insert(scan_list)
+ print(f'\n---- Insert {len(recording_list)} entry(s) into miniscope.Recording ----')
+ miniscope.Recording.insert(recording_list)
print('\n---- Successfully completed ingest_sessions ----')
diff --git a/workflow_miniscope/paths.py b/workflow_miniscope/paths.py
index 2fb680a..acdb7e8 100644
--- a/workflow_miniscope/paths.py
+++ b/workflow_miniscope/paths.py
@@ -8,6 +8,4 @@ def get_miniscope_root_data_dir():
def get_session_directory(session_key: dict) -> str:
from .pipeline import session
session_dir = (session.SessionDirectory & session_key).fetch1('session_dir')
- return session_dir
-
-
+ return session_dir
\ No newline at end of file
diff --git a/workflow_miniscope/pipeline.py b/workflow_miniscope/pipeline.py
index ba3ab71..1b974db 100644
--- a/workflow_miniscope/pipeline.py
+++ b/workflow_miniscope/pipeline.py
@@ -1,4 +1,5 @@
import datajoint as dj
+
from element_lab import lab
from element_animal import subject
from element_session import session_with_datetime as session
@@ -8,8 +9,7 @@
from element_animal.subject import Subject
from element_session.session_with_datetime import Session
-from .paths import get_miniscope_root_data_dir
-
+from .paths import get_miniscope_root_data_dir, get_session_directory
if 'custom' not in dj.config:
dj.config['custom'] = {}
@@ -27,14 +27,24 @@
session.activate(db_prefix + 'session', linking_module=__name__)
-# Declare table `Equipment` for use in element_miniscope -------------------------------
+# Declare table `Equipment` and `AnatomicalLocation` for use in element_miniscope ------
@lab.schema
class Equipment(dj.Manual):
definition = """
- scanner: varchar(32)
+ equipment: varchar(32)
+ ---
+ modality: varchar(256)
+ description: varchar(256)
"""
+@lab.schema
+class AnatomicalLocation(dj.Manual):
+ definition = """
+ recording_location_id : varchar(16)
+ ---
+ anatomical_description: varchar(256)
+ """
# Activate `miniscope` schema ----------------------------------------------------------
diff --git a/workflow_miniscope/version.py b/workflow_miniscope/version.py
index 523b6b7..fb30593 100644
--- a/workflow_miniscope/version.py
+++ b/workflow_miniscope/version.py
@@ -1,2 +1,2 @@
"""Package metadata"""
-__version__ = '0.1.0a2'
\ No newline at end of file
+__version__ = '0.1.0'
\ No newline at end of file