From 860c5c454475cf6b0442a1c8437a2cf10b0d6d9e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 24 Jan 2024 11:01:32 -0500 Subject: [PATCH 1/8] FIX: Paths and pyarrow (#834) --- .pre-commit-config.yaml | 10 ++-------- docs/source/v1.6.md.inc | 5 +++-- mne_bids_pipeline/_config_utils.py | 10 +++++++--- mne_bids_pipeline/_main.py | 4 ++-- .../steps/preprocessing/_04_frequency_filter.py | 6 +++++- mne_bids_pipeline/steps/sensor/_99_group_average.py | 3 ++- mne_bids_pipeline/tests/configs/config_ds003392.py | 2 +- mne_bids_pipeline/tests/datasets.py | 2 ++ mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv | 2 ++ mne_bids_pipeline/tests/test_run.py | 9 +++++++++ pyproject.toml | 1 + 11 files changed, 36 insertions(+), 18 deletions(-) create mode 100644 mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cbd9b0a06..363ef00c2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,17 +5,11 @@ files: ^(.*\.(py|yaml))$ # for example exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$ repos: - - repo: https://github.com/psf/black - rev: 23.12.1 - hooks: - - id: black - args: - - --safe - - --quiet - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.13 + rev: v0.1.14 hooks: - id: ruff + - id: ruff-format - repo: https://github.com/codespell-project/codespell rev: v2.2.6 hooks: diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index 837058f17..4ee244b1d 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -14,10 +14,11 @@ - MNE-BIDS-Pipeline now requires Python 3.9 or newer. (#825 by @hoechenberger) -[//]: # (### :bug: Bug fixes) +### :bug: Bug fixes -[//]: # (- Whatever (#000 by @whoever)) +- Fix minor issues with path handling for cross-talk and calibration files (#834 by @larsoner) ### :medical_symbol: Code health - The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger) +- Code formatting now uses `ruff format` instead of `black` (#834 by @larsoner) diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py index 3e7a1ec95..6f7ce1f0d 100644 --- a/mne_bids_pipeline/_config_utils.py +++ b/mne_bids_pipeline/_config_utils.py @@ -353,15 +353,19 @@ def get_mf_cal_fname( *, config: SimpleNamespace, subject: str, session: str ) -> pathlib.Path: if config.mf_cal_fname is None: - mf_cal_fpath = BIDSPath( + bids_path = BIDSPath( subject=subject, session=session, suffix="meg", datatype="meg", root=config.bids_root, - ).meg_calibration_fpath + ).match()[0] + mf_cal_fpath = bids_path.meg_calibration_fpath if mf_cal_fpath is None: - raise ValueError("Could not find Maxwell Filter Calibration file.") + raise ValueError( + "Could not determine Maxwell Filter Calibration file from BIDS " + f"definition for file {bids_path}." + ) else: mf_cal_fpath = pathlib.Path(config.mf_cal_fname).expanduser().absolute() if not mf_cal_fpath.exists(): diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py index 9489a2cca..ddbb49c6a 100755 --- a/mne_bids_pipeline/_main.py +++ b/mne_bids_pipeline/_main.py @@ -37,7 +37,7 @@ def main(): metavar="FILE", help="Create a template configuration file with the specified name. " "If specified, all other parameters will be ignored.", - ), + ) parser.add_argument( "--steps", dest="steps", @@ -70,7 +70,7 @@ def main(): If unspecified, this will be derivatives/mne-bids-pipeline inside the BIDS root.""" ), - ), + ) parser.add_argument( "--subject", dest="subject", default=None, help="The subject to process." ) diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py index b60543121..d026539ee 100644 --- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py +++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py @@ -173,7 +173,7 @@ def filter_data( raw = import_er_data( cfg=cfg, bids_path_er_in=bids_path_in, - bids_path_ref_in=in_files.pop("raw_ref_run"), + bids_path_ref_in=in_files.pop("raw_ref_run", None), bids_path_er_bads_in=bids_path_bads_in, # take bads from this run (0) bids_path_ref_bads_in=in_files.pop("raw_ref_run-bads", None), @@ -196,6 +196,7 @@ def filter_data( split=None, task=task, run=run, + check=False, ) raw.load_data() @@ -232,6 +233,9 @@ def filter_data( run_type=run_type, ) + # For example, might need to create + # derivatives/mne-bids-pipeline/sub-emptyroom/ses-20230412/meg + out_files[in_key].fpath.parent.mkdir(exist_ok=True, parents=True) raw.save( out_files[in_key], overwrite=True, diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py index a05a85a96..98e275336 100644 --- a/mne_bids_pipeline/steps/sensor/_99_group_average.py +++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py @@ -814,7 +814,8 @@ def average_csp_decoding( import scipy.stats cluster_forming_t_threshold = scipy.stats.t.ppf( - 1 - 0.05, len(cfg.subjects) - 1 # one-sided test + 1 - 0.05, + len(cfg.subjects) - 1, # one-sided test ) else: cluster_forming_t_threshold = cfg.cluster_forming_t_threshold diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py index edc30228f..0decbacc9 100644 --- a/mne_bids_pipeline/tests/configs/config_ds003392.py +++ b/mne_bids_pipeline/tests/configs/config_ds003392.py @@ -21,7 +21,7 @@ # Artifact correction. spatial_filter = "ica" ica_algorithm = "picard-extended_infomax" -ica_max_iterations = 500 +ica_max_iterations = 1000 ica_l_freq = 1.0 ica_n_components = 0.99 ica_reject_components = "auto" diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py index 60ace0c48..b50454251 100644 --- a/mne_bids_pipeline/tests/datasets.py +++ b/mne_bids_pipeline/tests/datasets.py @@ -85,6 +85,8 @@ class DATASET_OPTIONS_T(TypedDict, total=False): "ds003775": { "openneuro": "ds003775", "include": ["sub-010"], + # See https://github.com/OpenNeuroOrg/openneuro/issues/2976 + "exclude": ["sub-010/ses-t1/sub-010_ses-t1_scans.tsv"], }, "ds001810": { "openneuro": "ds001810", diff --git a/mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv b/mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv new file mode 100644 index 000000000..54b711284 --- /dev/null +++ b/mne_bids_pipeline/tests/sub-010_ses-t1_scans.tsv @@ -0,0 +1,2 @@ +filename acq_time +eeg/sub-010_ses-t1_task-resteyesc_eeg.edf 2017-05-09T12:11:44 diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py index eb07233b1..b394d6f0b 100644 --- a/mne_bids_pipeline/tests/test_run.py +++ b/mne_bids_pipeline/tests/test_run.py @@ -169,6 +169,15 @@ def test_run(dataset, monkeypatch, dataset_test, capsys, tmp_path): src=fix_path / "ds001971_participants.tsv", dst=DATA_DIR / "ds001971" / "participants.tsv", ) + elif dataset == "ds003775": + shutil.copy( + src=fix_path / "sub-010_ses-t1_scans.tsv", + dst=DATA_DIR + / "ds003775" + / "sub-010" + / "ses-t1" + / "sub-010_ses-t1_scans.tsv", + ) # Run the tests. steps = test_options.get("steps", ("preprocessing", "sensor")) diff --git a/pyproject.toml b/pyproject.toml index 21fc0671a..c576c3710 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ dependencies = [ "jupyter-server-proxy", # to have dask and jupyter working together "scikit-learn", "pandas", + "pyarrow", # from pandas "seaborn", "json_tricks", "pydantic >= 2.0.0", From 742e27eeff0bba67a20b2fcdb13e529c54b0bd70 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 24 Jan 2024 13:04:33 -0500 Subject: [PATCH 2/8] MAINT: Test result caching (#836) --- .github/workflows/run-tests.yml | 68 ++++++++++++++++++++++------- Makefile | 4 -- docs/source/v1.6.md.inc | 1 + mne_bids_pipeline/_logging.py | 9 +++- mne_bids_pipeline/tests/conftest.py | 2 + pyproject.toml | 1 - 6 files changed, 63 insertions(+), 22 deletions(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index eba837d23..998e597bb 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -6,22 +6,8 @@ concurrency: on: [push, pull_request] jobs: - check-style: - name: Style - runs-on: "ubuntu-latest" - defaults: - run: - shell: bash -l {0} - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - - name: Install ruff and codespell - run: pip install ruff codespell tomli - - run: make ruff - - run: make codespell-error - - uses: psf/black@stable check-doc: - name: Doc consistency + name: Doc consistency and codespell runs-on: ubuntu-latest defaults: run: @@ -30,8 +16,58 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 - run: pip install --upgrade pip - - run: pip install -ve .[tests] + - run: pip install -ve .[tests] codespell tomli + - run: make codespell-error - run: pytest mne_bids_pipeline -m "not dataset_test" - uses: codecov/codecov-action@v3 if: success() name: 'Upload coverage to CodeCov' + caching: + name: 'Caching on ${{ matrix.os }}' + timeout-minutes: 30 + continue-on-error: true + runs-on: ${{ matrix.os }} + defaults: + run: + shell: bash -el {0} + strategy: + matrix: + include: + - os: ubuntu-latest + - os: macos-latest + - os: windows-latest + env: + MNE_BIDS_PIPELINE_LEGACY_WINDOWS: "false" + PYTHONIOENCODING: 'utf8' # for Windows + steps: + - uses: actions/checkout@v4 + - uses: pyvista/setup-headless-display-action@main + with: + qt: true + pyvista: false + - uses: actions/setup-python@v5 + with: + python-version: "3.11" # no "multidict" wheels on 3.12 yet + - run: pip install -ve .[tests] + - uses: actions/cache@v4 + with: + key: ds001971 + path: ~/mne_data/ds001971 + id: ds001971-cache + - run: python -m mne_bids_pipeline._download ds001971 + if: steps.ds001971-cache.outputs.cache-hit != 'true' + - run: pytest --cov-append -k ds001971 mne_bids_pipeline/ + - run: pytest --cov-append -k ds001971 mne_bids_pipeline/ # uses "hash" method + timeout-minutes: 1 + - uses: actions/cache@v4 + with: + key: ds003392 + path: ~/mne_data/ds003392 + id: ds003392-cache + - run: python -m mne_bids_pipeline._download ds003392 + if: steps.ds003392-cache.outputs.cache-hit != 'true' + - run: pytest --cov-append -k ds003392 mne_bids_pipeline/ + - run: pytest --cov-append -k ds003392 mne_bids_pipeline/ # uses "mtime" method + timeout-minutes: 1 + - uses: codecov/codecov-action@v3 + if: success() diff --git a/Makefile b/Makefile index 8af267201..4e491526b 100644 --- a/Makefile +++ b/Makefile @@ -33,10 +33,6 @@ check: trailing-spaces: find . -name "*.py" | xargs perl -pi -e 's/[ \t]*$$//' -ruff: - ruff . - @echo "ruff passed" - codespell: # running manually; auto-fix spelling mistakes @codespell --write-changes $(CODESPELL_DIRS) diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index 4ee244b1d..026401106 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -22,3 +22,4 @@ - The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger) - Code formatting now uses `ruff format` instead of `black` (#834 by @larsoner) +- Code caching is now tested using GitHub Actions (#836 by @larsoner) diff --git a/mne_bids_pipeline/_logging.py b/mne_bids_pipeline/_logging.py index 6bcb21d73..931ee393d 100644 --- a/mne_bids_pipeline/_logging.py +++ b/mne_bids_pipeline/_logging.py @@ -27,7 +27,14 @@ def _console(self): force_terminal = os.getenv("MNE_BIDS_PIPELINE_FORCE_TERMINAL", None) if force_terminal is not None: force_terminal = force_terminal.lower() in ("true", "1") - kwargs = dict(soft_wrap=True, force_terminal=force_terminal) + legacy_windows = os.getenv("MNE_BIDS_PIPELINE_LEGACY_WINDOWS", None) + if legacy_windows is not None: + legacy_windows = legacy_windows.lower() in ("true", "1") + kwargs = dict( + soft_wrap=True, + force_terminal=force_terminal, + legacy_windows=legacy_windows, + ) kwargs["theme"] = rich.theme.Theme( dict( default="white", diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py index 97e380ffc..fa2014634 100644 --- a/mne_bids_pipeline/tests/conftest.py +++ b/mne_bids_pipeline/tests/conftest.py @@ -55,6 +55,8 @@ def pytest_configure(config): #../python_env/lib/python3.10/site-packages/mne/report/report.py:1713: in _add_ica_artifact_sources # self._add_figure( always:constrained_layout not applied.*:UserWarning + ignore:datetime\.datetime\.utcfromtimestamp.*:DeprecationWarning + ignore:datetime\.datetime\.utcnow.*:DeprecationWarning """ for warning_line in warning_lines.split("\n"): warning_line = warning_line.strip() diff --git a/pyproject.toml b/pyproject.toml index c576c3710..c3c5dbb2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,7 +68,6 @@ tests = [ "mkdocstrings-python", "mike", "jinja2", - "black", # function signature formatting "livereload", "openneuro-py >= 2022.2.0", "httpx >= 0.20", From cc93c660e928019435842ab776059c096bab93c6 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 26 Jan 2024 16:19:06 -0500 Subject: [PATCH 3/8] MAINT: Enable more ruff rules (#838) --- .github/workflows/run-tests.yml | 2 + .pre-commit-config.yaml | 1 + docs/hooks.py | 6 +- docs/source/examples/gen_examples.py | 14 +++-- docs/source/features/gen_steps.py | 1 + docs/source/v1.6.md.inc | 2 +- mne_bids_pipeline/__init__.py | 2 +- mne_bids_pipeline/_config.py | 55 ++++++++--------- mne_bids_pipeline/_config_import.py | 17 +++--- mne_bids_pipeline/_config_template.py | 8 +-- mne_bids_pipeline/_config_utils.py | 61 +++++++++---------- mne_bids_pipeline/_decoding.py | 3 +- mne_bids_pipeline/_download.py | 4 +- mne_bids_pipeline/_import_data.py | 27 ++++---- mne_bids_pipeline/_io.py | 2 +- mne_bids_pipeline/_main.py | 9 ++- mne_bids_pipeline/_parallel.py | 7 ++- mne_bids_pipeline/_reject.py | 9 +-- mne_bids_pipeline/_report.py | 17 +++--- mne_bids_pipeline/_run.py | 16 ++--- mne_bids_pipeline/_viz.py | 5 +- .../steps/freesurfer/_01_recon_all.py | 4 +- .../steps/freesurfer/_02_coreg_surfaces.py | 10 +-- .../steps/freesurfer/__init__.py | 3 +- .../steps/init/_01_init_derivatives_dir.py | 4 +- .../steps/init/_02_find_empty_room.py | 18 +++--- mne_bids_pipeline/steps/init/__init__.py | 3 +- .../steps/preprocessing/_01_data_quality.py | 27 ++++---- .../steps/preprocessing/_02_head_pos.py | 12 ++-- .../steps/preprocessing/_03_maxfilter.py | 24 ++++---- .../preprocessing/_04_frequency_filter.py | 17 +++--- .../steps/preprocessing/_05_make_epochs.py | 16 ++--- .../steps/preprocessing/_06a_run_ica.py | 24 ++++---- .../steps/preprocessing/_06b_run_ssp.py | 14 ++--- .../steps/preprocessing/_07a_apply_ica.py | 13 ++-- .../steps/preprocessing/_07b_apply_ssp.py | 6 +- .../steps/preprocessing/_08_ptp_reject.py | 9 ++- .../steps/preprocessing/__init__.py | 22 ++++--- .../steps/sensor/_01_make_evoked.py | 18 +++--- .../steps/sensor/_02_decoding_full_epochs.py | 29 ++++----- .../steps/sensor/_03_decoding_time_by_time.py | 25 ++++---- .../steps/sensor/_04_time_frequency.py | 12 ++-- .../steps/sensor/_05_decoding_csp.py | 44 +++++++------ .../steps/sensor/_06_make_cov.py | 18 +++--- .../steps/sensor/_99_group_average.py | 39 ++++++------ mne_bids_pipeline/steps/sensor/__init__.py | 16 ++--- .../steps/source/_01_make_bem_surfaces.py | 12 ++-- .../steps/source/_02_make_bem_solution.py | 8 +-- .../steps/source/_03_setup_source_space.py | 6 +- .../steps/source/_04_make_forward.py | 17 +++--- .../steps/source/_05_make_inverse.py | 16 ++--- .../steps/source/_99_group_average.py | 13 ++-- mne_bids_pipeline/steps/source/__init__.py | 14 +++-- .../tests/configs/config_ERP_CORE.py | 6 +- .../tests/configs/config_ds000117.py | 4 +- .../tests/configs/config_ds000246.py | 3 +- .../tests/configs/config_ds000247.py | 5 +- .../configs/config_ds000248_FLASH_BEM.py | 4 +- .../tests/configs/config_ds000248_T1_BEM.py | 4 +- .../tests/configs/config_ds000248_base.py | 4 +- .../configs/config_ds000248_coreg_surfaces.py | 4 +- .../tests/configs/config_ds000248_ica.py | 4 +- .../tests/configs/config_ds000248_no_mri.py | 4 +- .../tests/configs/config_ds001810.py | 4 +- .../tests/configs/config_ds003104.py | 3 +- .../tests/configs/config_ds003392.py | 4 +- .../tests/configs/config_ds003775.py | 4 +- .../tests/configs/config_ds004107.py | 3 +- .../tests/configs/config_ds004229.py | 3 +- .../configs/config_eeg_matchingpennies.py | 4 +- mne_bids_pipeline/tests/conftest.py | 3 +- mne_bids_pipeline/tests/datasets.py | 8 +-- mne_bids_pipeline/tests/test_cli.py | 2 + mne_bids_pipeline/tests/test_documented.py | 13 ++-- mne_bids_pipeline/tests/test_run.py | 13 ++-- mne_bids_pipeline/tests/test_validation.py | 1 + mne_bids_pipeline/typing.py | 11 ++-- pyproject.toml | 14 ++++- 78 files changed, 444 insertions(+), 469 deletions(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 998e597bb..43e692304 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -15,6 +15,8 @@ jobs: steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 + with: + python-version: "3.11" - run: pip install --upgrade pip - run: pip install -ve .[tests] codespell tomli - run: make codespell-error diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 363ef00c2..d8ceaa9ff 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,6 +9,7 @@ repos: rev: v0.1.14 hooks: - id: ruff + args: ["--fix"] - id: ruff-format - repo: https://github.com/codespell-project/codespell rev: v2.2.6 diff --git a/docs/hooks.py b/docs/hooks.py index ab7192f6e..41ece9a61 100644 --- a/docs/hooks.py +++ b/docs/hooks.py @@ -1,9 +1,9 @@ import logging -from typing import Dict, Any +from typing import Any from mkdocs.config.defaults import MkDocsConfig -from mkdocs.structure.pages import Page from mkdocs.structure.files import Files +from mkdocs.structure.pages import Page logger = logging.getLogger("mkdocs") @@ -13,7 +13,7 @@ # Ideally there would be a better hook, but it's unclear if context can # be obtained any earlier def on_template_context( - context: Dict[str, Any], + context: dict[str, Any], template_name: str, config: MkDocsConfig, ) -> None: diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py index f24c0d29f..1f2514274 100755 --- a/docs/source/examples/gen_examples.py +++ b/docs/source/examples/gen_examples.py @@ -1,19 +1,21 @@ #!/usr/bin/env python -from collections import defaultdict import contextlib import logging import shutil -from pathlib import Path import sys -from typing import Union, Iterable +from collections import defaultdict +from collections.abc import Iterable +from pathlib import Path +from typing import Union + +from tqdm import tqdm import mne_bids_pipeline -from mne_bids_pipeline._config_import import _import_config import mne_bids_pipeline.tests.datasets -from mne_bids_pipeline.tests.test_run import TEST_SUITE +from mne_bids_pipeline._config_import import _import_config from mne_bids_pipeline.tests.datasets import DATASET_OPTIONS -from tqdm import tqdm +from mne_bids_pipeline.tests.test_run import TEST_SUITE this_dir = Path(__file__).parent root = Path(mne_bids_pipeline.__file__).parent.resolve(strict=True) diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py index fffc61ddf..86ea6283f 100755 --- a/docs/source/features/gen_steps.py +++ b/docs/source/features/gen_steps.py @@ -3,6 +3,7 @@ import importlib from pathlib import Path + from mne_bids_pipeline._config_utils import _get_step_modules pre = """\ diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index 026401106..cf5596cb1 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -21,5 +21,5 @@ ### :medical_symbol: Code health - The package build backend has been switched from `setuptools` to `hatchling`. (#825 by @hoechenberger) -- Code formatting now uses `ruff format` instead of `black` (#834 by @larsoner) +- Code formatting now uses `ruff format` instead of `black` (#834, #838 by @larsoner) - Code caching is now tested using GitHub Actions (#836 by @larsoner) diff --git a/mne_bids_pipeline/__init__.py b/mne_bids_pipeline/__init__.py index 2826b97e6..2474edb8a 100644 --- a/mne_bids_pipeline/__init__.py +++ b/mne_bids_pipeline/__init__.py @@ -1,4 +1,4 @@ -from importlib.metadata import version, PackageNotFoundError +from importlib.metadata import PackageNotFoundError, version try: __version__ = version("mne_bids_pipeline") diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py index cc3793ea9..652e5ebfb 100644 --- a/mne_bids_pipeline/_config.py +++ b/mne_bids_pipeline/_config.py @@ -1,18 +1,17 @@ # Default settings for data processing and analysis. -from typing import Optional, Union, Iterable, List, Tuple, Dict, Callable, Literal +from typing import Callable, Iterable, Literal, Optional, Union from mne import Covariance from mne_bids import BIDSPath from mne_bids_pipeline.typing import ( - PathLike, ArbitraryContrast, - FloatArrayLike, DigMontageType, + FloatArrayLike, + PathLike, ) - ############################################################################### # Config parameters # ----------------- @@ -84,7 +83,7 @@ Enabling interactive mode deactivates parallel processing. """ -sessions: Union[List, Literal["all"]] = "all" +sessions: Union[list, Literal["all"]] = "all" """ The sessions to process. If `'all'`, will process all sessions found in the BIDS dataset. @@ -101,7 +100,7 @@ BIDS dataset. """ -exclude_runs: Optional[Dict[str, List[str]]] = None +exclude_runs: Optional[dict[str, list[str]]] = None """ Specify runs to exclude from analysis, for each participant individually. @@ -117,7 +116,7 @@ did not understand the instructions, etc.). """ -crop_runs: Optional[Tuple[float, float]] = None +crop_runs: Optional[tuple[float, float]] = None """ Crop the raw data of each run to the specified time interval `[tmin, tmax]`, in seconds. The runs will be cropped before Maxwell or frequency filtering is @@ -288,7 +287,7 @@ ``` """ -eeg_bipolar_channels: Optional[Dict[str, Tuple[str, str]]] = None +eeg_bipolar_channels: Optional[dict[str, tuple[str, str]]] = None """ Combine two channels into a bipolar channel, whose signal is the **difference** between the two combined channels, and add it to the data. @@ -688,7 +687,7 @@ Number of extended SSS (eSSS) basis projectors to use from empty-room data. """ -mf_esss_reject: Optional[Dict[str, float]] = None +mf_esss_reject: Optional[dict[str, float]] = None """ Rejection parameters to use when computing the extended SSS (eSSS) basis. """ @@ -980,7 +979,7 @@ ``` """ # noqa: E501 -conditions: Optional[Union[Iterable[str], Dict[str, str]]] = None +conditions: Optional[Union[Iterable[str], dict[str, str]]] = None """ The time-locked events based on which to create evoked responses. This can either be name of the experimental condition as specified in the @@ -1048,7 +1047,7 @@ and when the annotations do not contain any stimulation or behavior events. """ -baseline: Optional[Tuple[Optional[float], Optional[float]]] = (None, 0) +baseline: Optional[tuple[Optional[float], Optional[float]]] = (None, 0) """ Specifies which time interval to use for baseline correction of epochs; if `None`, no baseline correction is applied. @@ -1059,7 +1058,7 @@ ``` """ -contrasts: Iterable[Union[Tuple[str, str], ArbitraryContrast]] = [] +contrasts: Iterable[Union[tuple[str, str], ArbitraryContrast]] = [] """ The conditions to contrast via a subtraction of ERPs / ERFs. The list elements can either be tuples or dictionaries (or a mix of both). Each element in the @@ -1156,12 +1155,12 @@ # Rejection based on SSP # ~~~~~~~~~~~~~~~~~~~~~~ -n_proj_eog: Dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1) +n_proj_eog: dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1) """ Number of SSP vectors to create for EOG artifacts for each channel type. """ -n_proj_ecg: Dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1) +n_proj_ecg: dict[str, float] = dict(n_mag=1, n_grad=1, n_eeg=1) """ Number of SSP vectors to create for ECG artifacts for each channel type. """ @@ -1189,7 +1188,7 @@ `'separate'` otherwise. """ -ssp_reject_ecg: Optional[Union[Dict[str, float], Literal["autoreject_global"]]] = None +ssp_reject_ecg: Optional[Union[dict[str, float], Literal["autoreject_global"]]] = None """ Peak-to-peak amplitude limits of the ECG epochs to exclude from SSP fitting. This allows you to remove strong transient artifacts, which could negatively @@ -1207,7 +1206,7 @@ ``` """ -ssp_reject_eog: Optional[Union[Dict[str, float], Literal["autoreject_global"]]] = None +ssp_reject_eog: Optional[Union[dict[str, float], Literal["autoreject_global"]]] = None """ Peak-to-peak amplitude limits of the EOG epochs to exclude from SSP fitting. This allows you to remove strong transient artifacts, which could negatively @@ -1233,11 +1232,11 @@ # Rejection based on ICA # ~~~~~~~~~~~~~~~~~~~~~~ -ica_reject: Optional[Union[Dict[str, float], Literal["autoreject_local"]]] = None +ica_reject: Optional[Union[dict[str, float], Literal["autoreject_local"]]] = None """ Peak-to-peak amplitude limits to exclude epochs from ICA fitting. This allows you to remove strong transient artifacts from the epochs used for fitting ICA, which could -negatively affect ICA performance. +negatively affect ICA performance. The parameter values are the same as for [`reject`][mne_bids_pipeline._config.reject], but `"autoreject_global"` is not supported. `"autoreject_local"` here behaves @@ -1264,7 +1263,7 @@ to **not** specify rejection thresholds for EOG and ECG channels here – otherwise, ICA won't be able to "see" these artifacts. -???+ info +???+ info This setting is applied only to the epochs that are used for **fitting** ICA. The goal is to make it easier for ICA to produce a good decomposition. After fitting, ICA is applied to the epochs to be analyzed, usually with one or more components @@ -1280,7 +1279,7 @@ ica_reject = "autoreject_global" # find global (per channel type) PTP thresholds before fitting ICA ica_reject = "autoreject_local" # find local (per channel) thresholds and repair epochs before fitting ICA ``` -""" +""" # noqa: E501 ica_algorithm: Literal[ "picard", "fastica", "extended_infomax", "picard-extended_infomax" @@ -1373,7 +1372,7 @@ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ reject: Optional[ - Union[Dict[str, float], Literal["autoreject_global", "autoreject_local"]] + Union[dict[str, float], Literal["autoreject_global", "autoreject_local"]] ] = None """ Peak-to-peak amplitude limits to mark epochs as bad. This allows you to remove @@ -1386,7 +1385,7 @@ If `None` (default), do not apply artifact rejection. -If a dictionary, manually specify rejection thresholds (see examples). +If a dictionary, manually specify rejection thresholds (see examples). The thresholds provided here must be at least as stringent as those in [`ica_reject`][mne_bids_pipeline._config.ica_reject] if using ICA. In case of `'autoreject_global'`, thresholds for any channel that do not meet this @@ -1409,7 +1408,7 @@ reject = "autoreject_global" # find global (per channel type) PTP thresholds reject = "autoreject_local" # find local (per channel) thresholds and repair epochs ``` -""" +""" # noqa: E501 reject_tmin: Optional[float] = None """ @@ -1689,7 +1688,7 @@ ``` """ -decoding_csp_freqs: Optional[Dict[str, FloatArrayLike]] = None +decoding_csp_freqs: Optional[dict[str, FloatArrayLike]] = None """ The edges of the frequency bins to use for CSP decoding. @@ -1733,7 +1732,7 @@ } """ -time_frequency_baseline: Optional[Tuple[float, float]] = None +time_frequency_baseline: Optional[tuple[float, float]] = None """ Baseline period to use for the time-frequency analysis. If `None`, no baseline. ???+ example "Example" @@ -1964,7 +1963,7 @@ def mri_landmarks_kind(bids_path): """ noise_cov: Union[ - Tuple[Optional[float], Optional[float]], + tuple[Optional[float], Optional[float]], Literal["emptyroom", "rest", "ad-hoc"], Callable[[BIDSPath], Covariance], ] = (None, 0) @@ -2031,7 +2030,7 @@ def noise_cov(bids_path): ``` """ -source_info_path_update: Optional[Dict[str, str]] = dict(suffix="ave") +source_info_path_update: Optional[dict[str, str]] = dict(suffix="ave") """ When computing the forward and inverse solutions, by default the pipeline retrieves the `mne.Info` object from the cleaned evoked data. However, in @@ -2049,7 +2048,7 @@ def noise_cov(bids_path): ``` """ -inverse_targets: List[Literal["evoked"]] = ["evoked"] +inverse_targets: list[Literal["evoked"]] = ["evoked"] """ On which data to apply the inverse operator. Currently, the only supported diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py index 14a55df2e..66fe9583a 100644 --- a/mne_bids_pipeline/_config_import.py +++ b/mne_bids_pipeline/_config_import.py @@ -1,22 +1,21 @@ import ast import copy -from dataclasses import field import difflib -from functools import partial import importlib import os import pathlib +from dataclasses import field +from functools import partial from types import SimpleNamespace -from typing import Optional, List +from typing import Optional import matplotlib -import numpy as np import mne - +import numpy as np from pydantic import ValidationError from pydantic.dataclasses import dataclass -from ._logging import logger, gen_log_kwargs +from ._logging import gen_log_kwargs, logger from .typing import PathLike @@ -150,7 +149,7 @@ def _update_with_user_config( config_path: Optional[PathLike], overrides: Optional[SimpleNamespace], log: bool = False, -) -> List[str]: +) -> list[str]: # 1. Basics and hidden vars from . import __version__ @@ -433,8 +432,8 @@ def _pydantic_validate( def _check_misspellings_removals( config: SimpleNamespace, *, - valid_names: List[str], - user_names: List[str], + valid_names: list[str], + user_names: list[str], log: bool, ) -> None: # for each name in the user names, check if it's in the valid names but diff --git a/mne_bids_pipeline/_config_template.py b/mne_bids_pipeline/_config_template.py index 1925e020e..9c5a0ff29 100644 --- a/mne_bids_pipeline/_config_template.py +++ b/mne_bids_pipeline/_config_template.py @@ -1,8 +1,6 @@ from pathlib import Path -from typing import List - -from ._logging import logger, gen_log_kwargs +from ._logging import gen_log_kwargs, logger CONFIG_SOURCE_PATH = Path(__file__).parent / "_config.py" @@ -17,8 +15,8 @@ def create_template_config( raise FileExistsError(f"The specified path already exists: {target_path}") # Create a template by commenting out most of the lines in _config.py - config: List[str] = [] - with open(CONFIG_SOURCE_PATH, "r", encoding="utf-8") as f: + config: list[str] = [] + with open(CONFIG_SOURCE_PATH, encoding="utf-8") as f: for line in f: line = ( line if line.startswith(("#", "\n", "import", "from")) else f"# {line}" diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py index 6f7ce1f0d..321ccf0f0 100644 --- a/mne_bids_pipeline/_config_utils.py +++ b/mne_bids_pipeline/_config_utils.py @@ -3,15 +3,16 @@ import copy import functools import pathlib -from typing import List, Optional, Union, Iterable, Tuple, Dict, TypeVar, Literal, Any -from types import SimpleNamespace, ModuleType +from collections.abc import Iterable +from types import ModuleType, SimpleNamespace +from typing import Any, Literal, Optional, TypeVar, Union -import numpy as np import mne import mne_bids +import numpy as np from mne_bids import BIDSPath -from ._logging import logger, gen_log_kwargs +from ._logging import gen_log_kwargs, logger from .typing import ArbitraryContrast try: @@ -47,8 +48,8 @@ def get_fs_subject(config: SimpleNamespace, subject: str) -> str: return f"sub-{subject}" -@functools.lru_cache(maxsize=None) -def _get_entity_vals_cached(*args, **kwargs) -> List[str]: +@functools.cache +def _get_entity_vals_cached(*args, **kwargs) -> list[str]: return mne_bids.get_entity_vals(*args, **kwargs) @@ -73,18 +74,18 @@ def get_datatype(config: SimpleNamespace) -> Literal["meg", "eeg"]: ) -@functools.lru_cache(maxsize=None) +@functools.cache def _get_datatypes_cached(root): return mne_bids.get_datatypes(root=root) -def _get_ignore_datatypes(config: SimpleNamespace) -> Tuple[str]: - _all_datatypes: List[str] = _get_datatypes_cached(root=config.bids_root) +def _get_ignore_datatypes(config: SimpleNamespace) -> tuple[str]: + _all_datatypes: list[str] = _get_datatypes_cached(root=config.bids_root) _ignore_datatypes = set(_all_datatypes) - set([get_datatype(config)]) return tuple(sorted(_ignore_datatypes)) -def get_subjects(config: SimpleNamespace) -> List[str]: +def get_subjects(config: SimpleNamespace) -> list[str]: _valid_subjects = _get_entity_vals_cached( root=config.bids_root, entity_key="subject", @@ -102,7 +103,7 @@ def get_subjects(config: SimpleNamespace) -> List[str]: return sorted(subjects) -def get_sessions(config: SimpleNamespace) -> Union[List[None], List[str]]: +def get_sessions(config: SimpleNamespace) -> Union[list[None], list[str]]: sessions = copy.deepcopy(config.sessions) _all_sessions = _get_entity_vals_cached( root=config.bids_root, @@ -120,8 +121,8 @@ def get_sessions(config: SimpleNamespace) -> Union[List[None], List[str]]: def get_runs_all_subjects( config: SimpleNamespace, -) -> Dict[str, Union[List[None], List[str]]]: - """Gives the mapping between subjects and their runs. +) -> dict[str, Union[list[None], list[str]]]: + """Give the mapping between subjects and their runs. Returns ------- @@ -142,10 +143,10 @@ def get_runs_all_subjects( ) -@functools.lru_cache(maxsize=None) +@functools.cache def _get_runs_all_subjects_cached( - **config_dict: Dict[str, Any], -) -> Dict[str, Union[List[None], List[str]]]: + **config_dict: dict[str, Any], +) -> dict[str, Union[list[None], list[str]]]: config = SimpleNamespace(**config_dict) # Sometimes we check list equivalence for ch_types, so convert it back config.ch_types = list(config.ch_types) @@ -172,8 +173,8 @@ def _get_runs_all_subjects_cached( return subj_runs -def get_intersect_run(config: SimpleNamespace) -> List[str]: - """Returns the intersection of all the runs of all subjects.""" +def get_intersect_run(config: SimpleNamespace) -> list[str]: + """Return the intersection of all the runs of all subjects.""" subj_runs = get_runs_all_subjects(config) return list(set.intersection(*map(set, subj_runs.values()))) @@ -183,8 +184,8 @@ def get_runs( config: SimpleNamespace, subject: str, verbose: bool = False, -) -> Union[List[str], List[None]]: - """Returns a list of runs in the BIDS input data. +) -> Union[list[str], list[None]]: + """Return a list of runs in the BIDS input data. Parameters ---------- @@ -240,8 +241,8 @@ def get_runs_tasks( config: SimpleNamespace, subject: str, session: Optional[str], - which: Tuple[str] = ("runs", "noise", "rest"), -) -> List[Tuple[str]]: + which: tuple[str] = ("runs", "noise", "rest"), +) -> list[tuple[str]]: """Get (run, task) tuples for all runs plus (maybe) rest.""" from ._import_data import _get_noise_path, _get_rest_path @@ -311,7 +312,7 @@ def get_task(config: SimpleNamespace) -> Optional[str]: return _valid_tasks[0] -def get_channels_to_analyze(info: mne.Info, config: SimpleNamespace) -> List[str]: +def get_channels_to_analyze(info: mne.Info, config: SimpleNamespace) -> list[str]: # Return names of the channels of the channel types we wish to analyze. # We also include channels marked as "bad" here. # `exclude=[]`: keep "bad" channels, too. @@ -428,7 +429,7 @@ def _restrict_analyze_channels( return inst -def _get_scalp_in_files(cfg: SimpleNamespace) -> Dict[str, pathlib.Path]: +def _get_scalp_in_files(cfg: SimpleNamespace) -> dict[str, pathlib.Path]: subject_path = pathlib.Path(cfg.subjects_dir) / cfg.fs_subject seghead = subject_path / "surf" / "lh.seghead" in_files = dict() @@ -439,7 +440,7 @@ def _get_scalp_in_files(cfg: SimpleNamespace) -> Dict[str, pathlib.Path]: return in_files -def _get_bem_conductivity(cfg: SimpleNamespace) -> Tuple[Tuple[float], str]: +def _get_bem_conductivity(cfg: SimpleNamespace) -> tuple[tuple[float], str]: if cfg.fs_subject in ("fsaverage", cfg.use_template_mri): conductivity = None # should never be used tag = "5120-5120-5120" @@ -522,7 +523,7 @@ def get_all_contrasts(config: SimpleNamespace) -> Iterable[ArbitraryContrast]: return normalized_contrasts -def get_decoding_contrasts(config: SimpleNamespace) -> Iterable[Tuple[str, str]]: +def get_decoding_contrasts(config: SimpleNamespace) -> Iterable[tuple[str, str]]: _validate_contrasts(config.contrasts) normalized_contrasts = [] for contrast in config.contrasts: @@ -583,12 +584,8 @@ def _validate_contrasts(contrasts: SimpleNamespace) -> None: raise ValueError("Contrasts must be tuples or well-formed dicts") -def _get_step_modules() -> Dict[str, Tuple[ModuleType]]: - from .steps import init - from .steps import preprocessing - from .steps import sensor - from .steps import source - from .steps import freesurfer +def _get_step_modules() -> dict[str, tuple[ModuleType]]: + from .steps import freesurfer, init, preprocessing, sensor, source INIT_STEPS = init._STEPS PREPROCESSING_STEPS = preprocessing._STEPS diff --git a/mne_bids_pipeline/_decoding.py b/mne_bids_pipeline/_decoding.py index 2b6be3cfc..4d895395b 100644 --- a/mne_bids_pipeline/_decoding.py +++ b/mne_bids_pipeline/_decoding.py @@ -1,8 +1,7 @@ import numpy as np -from sklearn.linear_model import LogisticRegression from joblib import parallel_backend - from mne.utils import _validate_type +from sklearn.linear_model import LogisticRegression class LogReg(LogisticRegression): diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py index 33e565207..45de893ed 100644 --- a/mne_bids_pipeline/_download.py +++ b/mne_bids_pipeline/_download.py @@ -12,7 +12,7 @@ def _download_via_datalad(*, ds_name: str, ds_path: Path): import datalad.api as dl - print('datalad installing "{}"'.format(ds_name)) + print(f'datalad installing "{ds_name}"') options = DATASET_OPTIONS[ds_name] git_url = options["git"] assert "exclude" not in options @@ -28,7 +28,7 @@ def _download_via_datalad(*, ds_name: str, ds_path: Path): n_jobs = 1 for to_get in DATASET_OPTIONS[ds_name].get("include", []): - print('datalad get data "{}" for "{}"'.format(to_get, ds_name)) + print(f'datalad get data "{to_get}" for "{ds_name}"') dataset.get(to_get, jobs=n_jobs) diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py index ca52c59e1..d7f22240d 100644 --- a/mne_bids_pipeline/_import_data.py +++ b/mne_bids_pipeline/_import_data.py @@ -1,21 +1,22 @@ +from collections.abc import Iterable from types import SimpleNamespace -from typing import Dict, Optional, Iterable, Union, List, Literal +from typing import Literal, Optional, Union import mne -from mne_bids import BIDSPath, read_raw_bids, get_bids_path_from_fname import numpy as np import pandas as pd +from mne_bids import BIDSPath, get_bids_path_from_fname, read_raw_bids from ._config_utils import ( - get_mf_reference_run, - get_runs, - get_datatype, - get_task, _bids_kwargs, _do_mf_autobad, _pl, + get_datatype, + get_mf_reference_run, + get_runs, + get_task, ) -from ._io import _read_json, _empty_room_match_path +from ._io import _empty_room_match_path, _read_json from ._logging import gen_log_kwargs, logger from ._run import _update_for_splits from .typing import PathLike @@ -27,8 +28,8 @@ def make_epochs( subject: str, session: Optional[str], raw: mne.io.BaseRaw, - event_id: Optional[Union[Dict[str, int], Literal["auto"]]], - conditions: Union[Iterable[str], Dict[str, str]], + event_id: Optional[Union[dict[str, int], Literal["auto"]]], + conditions: Union[Iterable[str], dict[str, str]], tmin: float, tmax: float, metadata_tmin: Optional[float], @@ -147,12 +148,12 @@ def make_epochs( return epochs -def annotations_to_events(*, raw_paths: List[PathLike]) -> Dict[str, int]: +def annotations_to_events(*, raw_paths: list[PathLike]) -> dict[str, int]: """Generate a unique event name -> event code mapping. The mapping can that can be used across all passed raws. """ - event_names: List[str] = [] + event_names: list[str] = [] for raw_fname in raw_paths: raw = mne.io.read_raw_fif(raw_fname) _, event_id = mne.events_from_annotations(raw=raw) @@ -434,6 +435,8 @@ def import_er_data( The BIDS path to the empty room bad channels file. bids_path_ref_bads_in The BIDS path to the reference data bad channels file. + prepare_maxwell_filter + Whether to prepare the empty-room data for Maxwell filtering. Returns ------- @@ -753,7 +756,7 @@ def _read_bads_tsv( *, cfg: SimpleNamespace, bids_path_bads: BIDSPath, -) -> List[str]: +) -> list[str]: bads_tsv = pd.read_csv(bids_path_bads.fpath, sep="\t", header=0) return bads_tsv[bads_tsv.columns[0]].tolist() diff --git a/mne_bids_pipeline/_io.py b/mne_bids_pipeline/_io.py index 0b7485f76..f1a2b0ce3 100644 --- a/mne_bids_pipeline/_io.py +++ b/mne_bids_pipeline/_io.py @@ -14,7 +14,7 @@ def _write_json(fname: PathLike, data: dict) -> None: def _read_json(fname: PathLike) -> dict: - with open(fname, "r", encoding="utf-8") as f: + with open(fname, encoding="utf-8") as f: return json_tricks.load(f) diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py index ddbb49c6a..04ddabe1e 100755 --- a/mne_bids_pipeline/_main.py +++ b/mne_bids_pipeline/_main.py @@ -1,16 +1,15 @@ import argparse import pathlib -from textwrap import dedent import time -from typing import List +from textwrap import dedent from types import ModuleType, SimpleNamespace import numpy as np -from ._config_utils import _get_step_modules from ._config_import import _import_config from ._config_template import create_template_config -from ._logging import logger, gen_log_kwargs +from ._config_utils import _get_step_modules +from ._logging import gen_log_kwargs, logger from ._parallel import get_parallel_backend from ._run import _short_step_path @@ -182,7 +181,7 @@ def main(): if not cache: overrides.memory_location = False - step_modules: List[ModuleType] = [] + step_modules: list[ModuleType] = [] STEP_MODULES = _get_step_modules() for stage, step in zip(processing_stages, processing_steps): if stage not in STEP_MODULES.keys(): diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py index e79ae5151..9c74e6474 100644 --- a/mne_bids_pipeline/_parallel.py +++ b/mne_bids_pipeline/_parallel.py @@ -1,12 +1,13 @@ """Parallelization.""" -from typing import Literal, Callable from types import SimpleNamespace +from typing import Callable, Literal import joblib -from mne.utils import use_log_level, logger as mne_logger +from mne.utils import logger as mne_logger +from mne.utils import use_log_level -from ._logging import logger, gen_log_kwargs, _is_testing +from ._logging import _is_testing, gen_log_kwargs, logger def get_n_jobs(*, exec_params: SimpleNamespace, log_override: bool = False) -> int: diff --git a/mne_bids_pipeline/_reject.py b/mne_bids_pipeline/_reject.py index 5b3729dc2..ca506239d 100644 --- a/mne_bids_pipeline/_reject.py +++ b/mne_bids_pipeline/_reject.py @@ -1,21 +1,22 @@ """Rejection.""" -from typing import Optional, Union, Iterable, Dict, Literal +from collections.abc import Iterable +from typing import Literal, Optional, Union import mne -from ._logging import logger, gen_log_kwargs +from ._logging import gen_log_kwargs, logger def _get_reject( *, subject: str, session: Optional[str], - reject: Union[Dict[str, float], Literal["autoreject_global"]], + reject: Union[dict[str, float], Literal["autoreject_global"]], ch_types: Iterable[Literal["meg", "mag", "grad", "eeg"]], param: str, epochs: Optional[mne.BaseEpochs] = None, -) -> Dict[str, float]: +) -> dict[str, float]: if reject is None: return dict() diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py index bf42a27a2..ed514925d 100644 --- a/mne_bids_pipeline/_report.py +++ b/mne_bids_pipeline/_report.py @@ -1,24 +1,23 @@ import contextlib from functools import lru_cache from io import StringIO -from typing import Optional, List, Literal from types import SimpleNamespace +from typing import Literal, Optional -from filelock import FileLock import matplotlib.transforms +import mne import numpy as np import pandas as pd -from scipy.io import loadmat - -import mne +from filelock import FileLock from mne.io import BaseRaw from mne.utils import _pl from mne_bids import BIDSPath from mne_bids.stats import count_events +from scipy.io import loadmat from ._config_utils import get_all_contrasts from ._decoding import _handle_csp_args -from ._logging import logger, gen_log_kwargs, _linkfile +from ._logging import _linkfile, gen_log_kwargs, logger @contextlib.contextmanager @@ -123,8 +122,8 @@ def _open_report( def _plot_full_epochs_decoding_scores( - contrast_names: List[str], - scores: List[np.ndarray], + contrast_names: list[str], + scores: list[np.ndarray], metric: str, kind: Literal["single-subject", "grand-average"] = "single-subject", ): @@ -458,7 +457,7 @@ def _gen_empty_report( return report -def _contrasts_to_names(contrasts: List[List[str]]) -> List[str]: +def _contrasts_to_names(contrasts: list[list[str]]) -> list[str]: return [f"{c[0]} vs.\n{c[1]}" for c in contrasts] diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py index c76126ea2..128b876ed 100644 --- a/mne_bids_pipeline/_run.py +++ b/mne_bids_pipeline/_run.py @@ -7,19 +7,19 @@ import pathlib import pdb import sys -import traceback import time -from typing import Callable, Optional, Dict, List, Literal, Union +import traceback from types import SimpleNamespace +from typing import Callable, Literal, Optional, Union -from filelock import FileLock -from joblib import Memory import json_tricks import pandas as pd +from filelock import FileLock +from joblib import Memory from mne_bids import BIDSPath from ._config_utils import get_task -from ._logging import logger, gen_log_kwargs, _is_testing +from ._logging import _is_testing, gen_log_kwargs, logger def failsafe_run( @@ -303,7 +303,7 @@ def save_logs(*, config: SimpleNamespace, logs) -> None: # TODO add type def _update_for_splits( - files_dict: Union[Dict[str, BIDSPath], BIDSPath], + files_dict: Union[dict[str, BIDSPath], BIDSPath], key: Optional[str], *, single: bool = False, @@ -346,7 +346,7 @@ def _sanitize_callable(val): def _get_step_path( - stack: Optional[List[inspect.FrameInfo]] = None, + stack: Optional[list[inspect.FrameInfo]] = None, ) -> pathlib.Path: if stack is None: stack = inspect.stack() @@ -372,7 +372,7 @@ def _short_step_path(step_path: pathlib.Path) -> str: def _prep_out_files( *, exec_params: SimpleNamespace, - out_files: Dict[str, BIDSPath], + out_files: dict[str, BIDSPath], ): for key, fname in out_files.items(): out_files[key] = _path_to_str_hash( diff --git a/mne_bids_pipeline/_viz.py b/mne_bids_pipeline/_viz.py index 8e49af509..4055ab7c4 100644 --- a/mne_bids_pipeline/_viz.py +++ b/mne_bids_pipeline/_viz.py @@ -1,10 +1,9 @@ -from typing import List import numpy as np import pandas as pd from matplotlib.figure import Figure -def plot_auto_scores(auto_scores, *, ch_types) -> List[Figure]: +def plot_auto_scores(auto_scores, *, ch_types) -> list[Figure]: # Plot scores of automated bad channel detection. import matplotlib.pyplot as plt import seaborn as sns @@ -15,7 +14,7 @@ def plot_auto_scores(auto_scores, *, ch_types) -> List[Figure]: ch_types_[idx] = "grad" ch_types_.insert(idx + 1, "mag") - figs: List[Figure] = [] + figs: list[Figure] = [] for ch_type in ch_types_: # Only select the data for mag or grad channels. ch_subset = auto_scores["ch_types"] == ch_type diff --git a/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py b/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py index ee803c800..0633a9db0 100755 --- a/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py +++ b/mne_bids_pipeline/steps/freesurfer/_01_recon_all.py @@ -11,8 +11,8 @@ from mne.utils import run_subprocess from ..._config_utils import get_fs_subjects_dir, get_subjects -from ..._logging import logger, gen_log_kwargs -from ..._parallel import parallel_func, get_parallel_backend +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func fs_bids_app = Path(__file__).parent / "contrib" / "run.py" diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py index 560448713..eb5f86151 100644 --- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py +++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py @@ -10,14 +10,14 @@ import mne.bem from ..._config_utils import ( - get_fs_subjects_dir, + _get_scalp_in_files, get_fs_subject, + get_fs_subjects_dir, get_subjects, - _get_scalp_in_files, ) -from ..._logging import logger, gen_log_kwargs -from ..._parallel import parallel_func, get_parallel_backend -from ..._run import failsafe_run, _prep_out_files +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func +from ..._run import _prep_out_files, failsafe_run fs_bids_app = Path(__file__).parent / "contrib" / "run.py" diff --git a/mne_bids_pipeline/steps/freesurfer/__init__.py b/mne_bids_pipeline/steps/freesurfer/__init__.py index 84e37008a..7f4d9d088 100644 --- a/mne_bids_pipeline/steps/freesurfer/__init__.py +++ b/mne_bids_pipeline/steps/freesurfer/__init__.py @@ -3,7 +3,6 @@ Surface reconstruction via FreeSurfer. These steps are not run by default. """ -from . import _01_recon_all -from . import _02_coreg_surfaces +from . import _01_recon_all, _02_coreg_surfaces _STEPS = (_01_recon_all, _02_coreg_surfaces) diff --git a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py index a964e6d59..2f17b0c77 100644 --- a/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py +++ b/mne_bids_pipeline/steps/init/_01_init_derivatives_dir.py @@ -3,13 +3,13 @@ Initialize the derivatives directory. """ -from typing import Optional from types import SimpleNamespace +from typing import Optional from mne_bids.config import BIDS_VERSION from mne_bids.utils import _write_json -from ..._config_utils import get_subjects, get_sessions, _bids_kwargs +from ..._config_utils import _bids_kwargs, get_sessions, get_subjects from ..._logging import gen_log_kwargs, logger from ..._run import failsafe_run diff --git a/mne_bids_pipeline/steps/init/_02_find_empty_room.py b/mne_bids_pipeline/steps/init/_02_find_empty_room.py index d9334a9cf..fcb0536c5 100644 --- a/mne_bids_pipeline/steps/init/_02_find_empty_room.py +++ b/mne_bids_pipeline/steps/init/_02_find_empty_room.py @@ -1,26 +1,26 @@ """Find empty-room data matches.""" from types import SimpleNamespace -from typing import Dict, Optional +from typing import Optional from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, + _pl, get_datatype, + get_mf_reference_run, get_sessions, get_subjects, - get_mf_reference_run, - _bids_kwargs, - _pl, ) from ..._io import _empty_room_match_path, _write_json from ..._logging import gen_log_kwargs, logger -from ..._run import _update_for_splits, failsafe_run, save_logs, _prep_out_files +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_find_empty_room( *, subject: str, session: Optional[str], run: Optional[str], cfg: SimpleNamespace -) -> Dict[str, BIDSPath]: +) -> dict[str, BIDSPath]: """Get paths of files required by find_empty_room function.""" bids_path_in = BIDSPath( subject=subject, @@ -35,7 +35,7 @@ def get_input_fnames_find_empty_room( root=cfg.bids_root, check=False, ) - in_files: Dict[str, BIDSPath] = dict() + in_files: dict[str, BIDSPath] = dict() in_files[f"raw_run-{run}"] = bids_path_in _update_for_splits(in_files, f"raw_run-{run}", single=True) if hasattr(bids_path_in, "find_matching_sidecar"): @@ -64,8 +64,8 @@ def find_empty_room( subject: str, session: Optional[str], run: Optional[str], - in_files: Dict[str, BIDSPath], -) -> Dict[str, BIDSPath]: + in_files: dict[str, BIDSPath], +) -> dict[str, BIDSPath]: raw_path = in_files.pop(f"raw_run-{run}") in_files.pop("sidecar", None) try: diff --git a/mne_bids_pipeline/steps/init/__init__.py b/mne_bids_pipeline/steps/init/__init__.py index 72a80cf13..6435ffdfe 100644 --- a/mne_bids_pipeline/steps/init/__init__.py +++ b/mne_bids_pipeline/steps/init/__init__.py @@ -1,7 +1,6 @@ """Filesystem initialization and dataset inspection.""" -from . import _01_init_derivatives_dir -from . import _02_find_empty_room +from . import _01_init_derivatives_dir, _02_find_empty_room _STEPS = ( _01_init_derivatives_dir, diff --git a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py index 655280e52..3b64c5659 100644 --- a/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py +++ b/mne_bids_pipeline/steps/preprocessing/_01_data_quality.py @@ -3,34 +3,33 @@ from types import SimpleNamespace from typing import Optional -import pandas as pd - import mne +import pandas as pd from mne_bids import BIDSPath from ..._config_utils import ( + _do_mf_autobad, + _pl, get_mf_cal_fname, get_mf_ctc_fname, - get_subjects, - get_sessions, get_runs_tasks, - _do_mf_autobad, - _pl, + get_sessions, + get_subjects, ) from ..._import_data import ( - _get_run_rest_noise_path, - _get_mf_reference_run_path, - import_experimental_data, - import_er_data, - _bads_path, _auto_scores_path, + _bads_path, + _get_mf_reference_run_path, + _get_run_rest_noise_path, _import_data_kwargs, + import_er_data, + import_experimental_data, ) from ..._io import _write_json from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend -from ..._report import _open_report, _add_raw -from ..._run import failsafe_run, save_logs, _prep_out_files +from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _add_raw, _open_report +from ..._run import _prep_out_files, failsafe_run, save_logs from ..._viz import plot_auto_scores diff --git a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py index a75cd7339..d4a6a2c6b 100644 --- a/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py +++ b/mne_bids_pipeline/steps/preprocessing/_02_head_pos.py @@ -1,24 +1,24 @@ """Estimate head positions.""" -from typing import Optional from types import SimpleNamespace +from typing import Optional import mne from ..._config_utils import ( - get_subjects, - get_sessions, get_runs_tasks, + get_sessions, + get_subjects, ) from ..._import_data import ( - import_experimental_data, _get_run_rest_noise_path, _import_data_kwargs, + import_experimental_data, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend +from ..._parallel import get_parallel_backend, parallel_func from ..._report import _open_report -from ..._run import failsafe_run, save_logs, _prep_out_files +from ..._run import _prep_out_files, failsafe_run, save_logs def get_input_fnames_head_pos( diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py index 099336c5c..c5b58e2b6 100644 --- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py +++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py @@ -14,35 +14,35 @@ The function loads machine-specific calibration files. """ -from copy import deepcopy import gc -from typing import Optional +from copy import deepcopy from types import SimpleNamespace +from typing import Optional -import numpy as np import mne +import numpy as np from mne_bids import read_raw_bids from ..._config_utils import ( + _pl, get_mf_cal_fname, get_mf_ctc_fname, - get_subjects, - get_sessions, get_runs_tasks, - _pl, + get_sessions, + get_subjects, ) from ..._import_data import ( - import_experimental_data, - import_er_data, + _get_mf_reference_run_path, _get_run_path, _get_run_rest_noise_path, - _get_mf_reference_run_path, _import_data_kwargs, + import_er_data, + import_experimental_data, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend -from ..._report import _open_report, _add_raw -from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files +from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _add_raw, _open_report +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs # %% eSSS diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py index d026539ee..a44a1c70e 100644 --- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py +++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py @@ -14,27 +14,28 @@ If config.interactive = True plots raw data and power spectral density. """ # noqa: E501 -import numpy as np +from collections.abc import Iterable from types import SimpleNamespace -from typing import Optional, Union, Literal, Iterable +from typing import Literal, Optional, Union import mne +import numpy as np from ..._config_utils import ( - get_sessions, get_runs_tasks, + get_sessions, get_subjects, ) from ..._import_data import ( - import_experimental_data, - import_er_data, _get_run_rest_noise_path, _import_data_kwargs, + import_er_data, + import_experimental_data, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend -from ..._report import _open_report, _add_raw -from ..._run import failsafe_run, save_logs, _update_for_splits, _prep_out_files +from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _add_raw, _open_report +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_frequency_filter( diff --git a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py index d4deb4078..0cebb033e 100644 --- a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py +++ b/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py @@ -14,23 +14,23 @@ from mne_bids import BIDSPath from ..._config_utils import ( - get_runs, - get_subjects, + _bids_kwargs, get_eeg_reference, + get_runs, get_sessions, - _bids_kwargs, + get_subjects, ) -from ..._import_data import make_epochs, annotations_to_events +from ..._import_data import annotations_to_events, make_epochs from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func from ..._report import _open_report from ..._run import ( + _prep_out_files, + _sanitize_callable, + _update_for_splits, failsafe_run, save_logs, - _update_for_splits, - _sanitize_callable, - _prep_out_files, ) -from ..._parallel import parallel_func, get_parallel_backend def get_input_fnames_epochs( diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py index 294d05a26..00346df25 100644 --- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py +++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py @@ -11,31 +11,31 @@ run 05a-apply_ica.py. """ -from typing import List, Optional, Iterable, Tuple, Literal +from collections.abc import Iterable from types import SimpleNamespace +from typing import Literal, Optional -import pandas as pd -import numpy as np import autoreject - import mne -from mne.report import Report +import numpy as np +import pandas as pd from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs +from mne.report import Report from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, + get_eeg_reference, get_runs, get_sessions, get_subjects, - get_eeg_reference, - _bids_kwargs, ) -from ..._import_data import make_epochs, annotations_to_events +from ..._import_data import annotations_to_events, make_epochs from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend +from ..._parallel import get_parallel_backend, parallel_func from ..._reject import _get_reject from ..._report import _agg_backend -from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def filter_for_ica( @@ -190,10 +190,10 @@ def detect_bad_components( which: Literal["eog", "ecg"], epochs: mne.BaseEpochs, ica: mne.preprocessing.ICA, - ch_names: Optional[List[str]], + ch_names: Optional[list[str]], subject: str, session: Optional[str], -) -> Tuple[List[int], np.ndarray]: +) -> tuple[list[int], np.ndarray]: artifact = which.upper() msg = f"Performing automated {artifact} artifact detection …" logger.info(**gen_log_kwargs(message=msg)) diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py index eeb22cf36..46b88ee90 100644 --- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py +++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py @@ -3,26 +3,26 @@ These are often also referred to as PCA vectors. """ -from typing import Optional from types import SimpleNamespace +from typing import Optional import mne -from mne.preprocessing import create_eog_epochs, create_ecg_epochs -from mne import compute_proj_evoked, compute_proj_epochs +from mne import compute_proj_epochs, compute_proj_evoked +from mne.preprocessing import create_ecg_epochs, create_eog_epochs from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, + _pl, get_runs, get_sessions, get_subjects, - _bids_kwargs, - _pl, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend +from ..._parallel import get_parallel_backend, parallel_func from ..._reject import _get_reject from ..._report import _open_report -from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_run_ssp( diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py index 4b906a106..c24d8e015 100644 --- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py +++ b/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py @@ -14,22 +14,21 @@ from types import SimpleNamespace from typing import Optional -import pandas as pd import mne +import pandas as pd from mne.preprocessing import read_ica from mne.report import Report - from mne_bids import BIDSPath from ..._config_utils import ( - get_subjects, - get_sessions, _bids_kwargs, + get_sessions, + get_subjects, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend -from ..._report import _open_report, _agg_backend -from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files +from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _agg_backend, _open_report +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_apply_ica( diff --git a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py index 65fc27b70..9b1a83fc9 100644 --- a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py +++ b/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py @@ -12,13 +12,13 @@ from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, get_sessions, get_subjects, - _bids_kwargs, ) from ..._logging import gen_log_kwargs, logger -from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files -from ..._parallel import parallel_func, get_parallel_backend +from ..._parallel import get_parallel_backend, parallel_func +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_apply_ssp( diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py index b4a29f4e7..7f0bf0607 100644 --- a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py +++ b/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py @@ -11,22 +11,21 @@ from types import SimpleNamespace from typing import Optional -import numpy as np import autoreject - import mne +import numpy as np from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, get_sessions, get_subjects, - _bids_kwargs, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend +from ..._parallel import get_parallel_backend, parallel_func from ..._reject import _get_reject from ..._report import _open_report -from ..._run import failsafe_run, _update_for_splits, save_logs, _prep_out_files +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_drop_ptp( diff --git a/mne_bids_pipeline/steps/preprocessing/__init__.py b/mne_bids_pipeline/steps/preprocessing/__init__.py index 95637ecab..686b7cf27 100644 --- a/mne_bids_pipeline/steps/preprocessing/__init__.py +++ b/mne_bids_pipeline/steps/preprocessing/__init__.py @@ -1,15 +1,17 @@ """Preprocessing.""" -from . import _01_data_quality -from . import _02_head_pos -from . import _03_maxfilter -from . import _04_frequency_filter -from . import _05_make_epochs -from . import _06a_run_ica -from . import _06b_run_ssp -from . import _07a_apply_ica -from . import _07b_apply_ssp -from . import _08_ptp_reject +from . import ( + _01_data_quality, + _02_head_pos, + _03_maxfilter, + _04_frequency_filter, + _05_make_epochs, + _06a_run_ica, + _06b_run_ssp, + _07a_apply_ica, + _07b_apply_ssp, + _08_ptp_reject, +) _STEPS = ( _01_data_quality, diff --git a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py index 2ec0ea714..63d1854ae 100644 --- a/mne_bids_pipeline/steps/sensor/_01_make_evoked.py +++ b/mne_bids_pipeline/steps/sensor/_01_make_evoked.py @@ -7,22 +7,22 @@ from mne_bids import BIDSPath from ..._config_utils import ( - get_sessions, - get_subjects, - get_all_contrasts, _bids_kwargs, - _restrict_analyze_channels, _pl, + _restrict_analyze_channels, + get_all_contrasts, + get_sessions, + get_subjects, ) from ..._logging import gen_log_kwargs, logger -from ..._parallel import parallel_func, get_parallel_backend -from ..._report import _open_report, _sanitize_cond_tag, _all_conditions +from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _all_conditions, _open_report, _sanitize_cond_tag from ..._run import ( - failsafe_run, - save_logs, - _sanitize_callable, _prep_out_files, + _sanitize_callable, _update_for_splits, + failsafe_run, + save_logs, ) diff --git a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py index 81243bcd9..58a354c1c 100644 --- a/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py +++ b/mne_bids_pipeline/steps/sensor/_02_decoding_full_epochs.py @@ -12,37 +12,34 @@ from types import SimpleNamespace from typing import Optional +import mne import numpy as np import pandas as pd -from scipy.io import savemat, loadmat - -from sklearn.model_selection import cross_val_score -from sklearn.pipeline import make_pipeline -from sklearn.model_selection import StratifiedKFold - -import mne from mne.decoding import Scaler, Vectorizer from mne_bids import BIDSPath +from scipy.io import loadmat, savemat +from sklearn.model_selection import StratifiedKFold, cross_val_score +from sklearn.pipeline import make_pipeline from ..._config_utils import ( - get_sessions, - get_subjects, - get_eeg_reference, - get_decoding_contrasts, _bids_kwargs, - _restrict_analyze_channels, _get_decoding_proc, + _restrict_analyze_channels, + get_decoding_contrasts, + get_eeg_reference, + get_sessions, + get_subjects, ) -from ..._logging import gen_log_kwargs, logger from ..._decoding import LogReg -from ..._parallel import parallel_func, get_parallel_backend -from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func from ..._report import ( - _open_report, _contrasts_to_names, + _open_report, _plot_full_epochs_decoding_scores, _sanitize_cond_tag, ) +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_epochs_decoding( diff --git a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py index b435cf6ae..d61e865c4 100644 --- a/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py +++ b/mne_bids_pipeline/steps/sensor/_03_decoding_time_by_time.py @@ -15,38 +15,35 @@ from types import SimpleNamespace from typing import Optional +import mne import numpy as np import pandas as pd -from scipy.io import savemat, loadmat - -import mne from mne.decoding import GeneralizingEstimator, SlidingEstimator, cross_val_multiscore - from mne_bids import BIDSPath - -from sklearn.preprocessing import StandardScaler -from sklearn.pipeline import make_pipeline +from scipy.io import loadmat, savemat from sklearn.model_selection import StratifiedKFold +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler from ..._config_utils import ( - get_sessions, - get_subjects, - get_eeg_reference, - get_decoding_contrasts, _bids_kwargs, - _restrict_analyze_channels, _get_decoding_proc, + _restrict_analyze_channels, + get_decoding_contrasts, + get_eeg_reference, + get_sessions, + get_subjects, ) from ..._decoding import LogReg from ..._logging import gen_log_kwargs, logger -from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits from ..._parallel import get_parallel_backend, get_parallel_backend_name from ..._report import ( _open_report, _plot_decoding_time_generalization, - _sanitize_cond_tag, _plot_time_by_time_decoding_scores, + _sanitize_cond_tag, ) +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_time_decoding( diff --git a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py index e1e7b440c..0ab3aa3ea 100644 --- a/mne_bids_pipeline/steps/sensor/_04_time_frequency.py +++ b/mne_bids_pipeline/steps/sensor/_04_time_frequency.py @@ -7,24 +7,22 @@ from types import SimpleNamespace from typing import Optional -import numpy as np - import mne - +import numpy as np from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, + _restrict_analyze_channels, + get_eeg_reference, get_sessions, get_subjects, - get_eeg_reference, sanitize_cond_name, - _bids_kwargs, - _restrict_analyze_channels, ) from ..._logging import gen_log_kwargs, logger -from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits from ..._parallel import get_parallel_backend, parallel_func from ..._report import _open_report, _sanitize_cond_tag +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs def get_input_fnames_time_frequency( diff --git a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py index 1614854c1..c9d3ee077 100644 --- a/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py +++ b/mne_bids_pipeline/steps/sensor/_05_decoding_csp.py @@ -1,15 +1,13 @@ -""" -Decoding based on common spatial patterns (CSP). -""" +"""Decoding based on common spatial patterns (CSP).""" import os.path as op from types import SimpleNamespace -from typing import Dict, Optional, Tuple +from typing import Optional +import matplotlib.transforms import mne import numpy as np import pandas as pd -import matplotlib.transforms from mne.decoding import CSP, UnsupervisedSpatialFilter from mne_bids import BIDSPath from sklearn.decomposition import PCA @@ -17,35 +15,35 @@ from sklearn.pipeline import make_pipeline from ..._config_utils import ( - get_sessions, - get_subjects, - get_eeg_reference, - get_decoding_contrasts, _bids_kwargs, - _restrict_analyze_channels, _get_decoding_proc, + _restrict_analyze_channels, + get_decoding_contrasts, + get_eeg_reference, + get_sessions, + get_subjects, ) from ..._decoding import LogReg, _handle_csp_args -from ..._logging import logger, gen_log_kwargs -from ..._parallel import parallel_func, get_parallel_backend -from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func from ..._report import ( + _imshow_tf, _open_report, - _sanitize_cond_tag, _plot_full_epochs_decoding_scores, - _imshow_tf, + _sanitize_cond_tag, ) +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs -def _prepare_labels(*, epochs: mne.BaseEpochs, contrast: Tuple[str, str]) -> np.ndarray: +def _prepare_labels(*, epochs: mne.BaseEpochs, contrast: tuple[str, str]) -> np.ndarray: """Return the projection of the events_id on a boolean vector. This projection is useful in the case of hierarchical events: we project the different events contained in one condition into just one label. - Returns: - -------- + Returns + ------- A boolean numpy array containing the labels. """ epochs_cond_0 = epochs[contrast[0]] @@ -79,8 +77,8 @@ def _prepare_labels(*, epochs: mne.BaseEpochs, contrast: Tuple[str, str]) -> np. def prepare_epochs_and_y( - *, epochs: mne.BaseEpochs, contrast: Tuple[str, str], cfg, fmin: float, fmax: float -) -> Tuple[mne.BaseEpochs, np.ndarray]: + *, epochs: mne.BaseEpochs, contrast: tuple[str, str], cfg, fmin: float, fmax: float +) -> tuple[mne.BaseEpochs, np.ndarray]: """Band-pass between, sub-select the desired epochs, and prepare y.""" epochs_filt = epochs.copy().pick(["meg", "eeg"]) @@ -112,7 +110,7 @@ def get_input_fnames_csp( cfg: SimpleNamespace, subject: str, session: Optional[str], - contrast: Tuple[str], + contrast: tuple[str], ) -> dict: proc = _get_decoding_proc(config=cfg) fname_epochs = BIDSPath( @@ -143,8 +141,8 @@ def one_subject_decoding( exec_params: SimpleNamespace, subject: str, session: str, - contrast: Tuple[str, str], - in_files: Dict[str, BIDSPath], + contrast: tuple[str, str], + in_files: dict[str, BIDSPath], ) -> dict: """Run one subject. diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py index 2cb3b8ebf..a9c211df4 100644 --- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py +++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py @@ -3,29 +3,29 @@ Covariance matrices are computed and saved. """ -from typing import Optional from types import SimpleNamespace +from typing import Optional import mne from mne_bids import BIDSPath +from ..._config_import import _import_config from ..._config_utils import ( + _bids_kwargs, + _restrict_analyze_channels, + get_noise_cov_bids_path, get_sessions, get_subjects, - get_noise_cov_bids_path, - _bids_kwargs, ) -from ..._config_import import _import_config -from ..._config_utils import _restrict_analyze_channels from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func -from ..._report import _open_report, _sanitize_cond_tag, _all_conditions +from ..._report import _all_conditions, _open_report, _sanitize_cond_tag from ..._run import ( - failsafe_run, - save_logs, - _sanitize_callable, _prep_out_files, + _sanitize_callable, _update_for_splits, + failsafe_run, + save_logs, ) diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py index 98e275336..7ac19e7de 100644 --- a/mne_bids_pipeline/steps/sensor/_99_group_average.py +++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py @@ -6,42 +6,41 @@ import os import os.path as op from functools import partial -from typing import Optional, List, Tuple from types import SimpleNamespace -from ...typing import TypedDict +from typing import Optional +import mne import numpy as np import pandas as pd -from scipy.io import loadmat, savemat - -import mne from mne_bids import BIDSPath +from scipy.io import loadmat, savemat from ..._config_utils import ( - get_sessions, - get_subjects, - get_eeg_reference, - get_decoding_contrasts, _bids_kwargs, - _restrict_analyze_channels, _pl, + _restrict_analyze_channels, + get_decoding_contrasts, + get_eeg_reference, + get_sessions, + get_subjects, ) from ..._decoding import _handle_csp_args from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func -from ..._run import failsafe_run, save_logs, _prep_out_files, _update_for_splits from ..._report import ( + _all_conditions, + _contrasts_to_names, _open_report, - _sanitize_cond_tag, - add_event_counts, - add_csp_grand_average, + _plot_decoding_time_generalization, _plot_full_epochs_decoding_scores, _plot_time_by_time_decoding_scores_gavg, + _sanitize_cond_tag, + add_csp_grand_average, + add_event_counts, plot_time_by_time_decoding_t_values, - _plot_decoding_time_generalization, - _contrasts_to_names, - _all_conditions, ) +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs +from ...typing import TypedDict def get_input_fnames_average_evokeds( @@ -186,7 +185,7 @@ def _decoding_cluster_permutation_test( cluster_forming_t_threshold: Optional[float], n_permutations: int, random_seed: int, -) -> Tuple[np.ndarray, List[ClusterAcrossTime], int]: +) -> tuple[np.ndarray, list[ClusterAcrossTime], int]: """Perform a cluster permutation test on decoding scores. The clusters are formed across time points. @@ -625,7 +624,7 @@ def get_input_files_average_full_epochs_report( cfg: SimpleNamespace, subject: str, session: Optional[str], - decoding_contrasts: List[List[str]], + decoding_contrasts: list[list[str]], ) -> dict: in_files = dict() for contrast in decoding_contrasts: @@ -649,7 +648,7 @@ def average_full_epochs_report( exec_params: SimpleNamespace, subject: str, session: Optional[str], - decoding_contrasts: List[List[str]], + decoding_contrasts: list[list[str]], in_files: dict, ) -> dict: """Add decoding results to the grand average report.""" diff --git a/mne_bids_pipeline/steps/sensor/__init__.py b/mne_bids_pipeline/steps/sensor/__init__.py index fc76bf551..848efadf8 100644 --- a/mne_bids_pipeline/steps/sensor/__init__.py +++ b/mne_bids_pipeline/steps/sensor/__init__.py @@ -1,12 +1,14 @@ """Sensor-space analysis.""" -from . import _01_make_evoked -from . import _02_decoding_full_epochs -from . import _03_decoding_time_by_time -from . import _04_time_frequency -from . import _05_decoding_csp -from . import _06_make_cov -from . import _99_group_average +from . import ( + _01_make_evoked, + _02_decoding_full_epochs, + _03_decoding_time_by_time, + _04_time_frequency, + _05_decoding_csp, + _06_make_cov, + _99_group_average, +) _STEPS = ( _01_make_evoked, diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py index fc4051c9f..da2b64890 100644 --- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py +++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py @@ -11,17 +11,17 @@ import mne from ..._config_utils import ( - get_fs_subject, - get_subjects, - get_sessions, + _bids_kwargs, _get_bem_conductivity, + get_fs_subject, get_fs_subjects_dir, - _bids_kwargs, + get_sessions, + get_subjects, ) -from ..._logging import logger, gen_log_kwargs +from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func -from ..._run import failsafe_run, save_logs, _prep_out_files from ..._report import _open_report, _render_bem +from ..._run import _prep_out_files, failsafe_run, save_logs def _get_bem_params(cfg: SimpleNamespace): diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py index 67f0c2737..a09d063e2 100644 --- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py +++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py @@ -10,13 +10,13 @@ from ..._config_utils import ( _get_bem_conductivity, - get_fs_subjects_dir, get_fs_subject, + get_fs_subjects_dir, get_subjects, ) -from ..._logging import logger, gen_log_kwargs -from ..._parallel import parallel_func, get_parallel_backend -from ..._run import failsafe_run, save_logs, _prep_out_files +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func +from ..._run import _prep_out_files, failsafe_run, save_logs def get_input_fnames_make_bem_solution( diff --git a/mne_bids_pipeline/steps/source/_03_setup_source_space.py b/mne_bids_pipeline/steps/source/_03_setup_source_space.py index 4710750f9..64e7314ed 100644 --- a/mne_bids_pipeline/steps/source/_03_setup_source_space.py +++ b/mne_bids_pipeline/steps/source/_03_setup_source_space.py @@ -8,9 +8,9 @@ import mne from ..._config_utils import get_fs_subject, get_fs_subjects_dir, get_subjects -from ..._logging import logger, gen_log_kwargs -from ..._run import failsafe_run, save_logs, _prep_out_files -from ..._parallel import parallel_func, get_parallel_backend +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func +from ..._run import _prep_out_files, failsafe_run, save_logs def get_input_fnames_setup_source_space(*, cfg, subject): diff --git a/mne_bids_pipeline/steps/source/_04_make_forward.py b/mne_bids_pipeline/steps/source/_04_make_forward.py index a2c1fc211..28586b742 100644 --- a/mne_bids_pipeline/steps/source/_04_make_forward.py +++ b/mne_bids_pipeline/steps/source/_04_make_forward.py @@ -6,27 +6,26 @@ from types import SimpleNamespace from typing import Optional -import numpy as np - import mne +import numpy as np from mne.coreg import Coregistration from mne_bids import BIDSPath, get_head_mri_trans +from ..._config_import import _import_config from ..._config_utils import ( - get_fs_subject, - get_subjects, + _bids_kwargs, _get_bem_conductivity, + _meg_in_ch_types, + get_fs_subject, get_fs_subjects_dir, get_runs, - _meg_in_ch_types, get_sessions, - _bids_kwargs, + get_subjects, ) -from ..._config_import import _import_config -from ..._logging import logger, gen_log_kwargs +from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func from ..._report import _open_report, _render_bem -from ..._run import failsafe_run, save_logs, _prep_out_files +from ..._run import _prep_out_files, failsafe_run, save_logs def _prepare_trans_template( diff --git a/mne_bids_pipeline/steps/source/_05_make_inverse.py b/mne_bids_pipeline/steps/source/_05_make_inverse.py index 449675817..54f9fd0ae 100644 --- a/mne_bids_pipeline/steps/source/_05_make_inverse.py +++ b/mne_bids_pipeline/steps/source/_05_make_inverse.py @@ -8,25 +8,25 @@ import mne from mne.minimum_norm import ( - make_inverse_operator, apply_inverse, + make_inverse_operator, write_inverse_operator, ) from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, + get_fs_subject, + get_fs_subjects_dir, get_noise_cov_bids_path, + get_sessions, get_subjects, sanitize_cond_name, - get_sessions, - get_fs_subjects_dir, - get_fs_subject, - _bids_kwargs, ) -from ..._logging import logger, gen_log_kwargs +from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func -from ..._report import _open_report, _sanitize_cond_tag, _all_conditions -from ..._run import failsafe_run, save_logs, _sanitize_callable, _prep_out_files +from ..._report import _all_conditions, _open_report, _sanitize_cond_tag +from ..._run import _prep_out_files, _sanitize_callable, failsafe_run, save_logs def get_input_fnames_inverse( diff --git a/mne_bids_pipeline/steps/source/_99_group_average.py b/mne_bids_pipeline/steps/source/_99_group_average.py index 9e855d6df..eb26c1c5f 100644 --- a/mne_bids_pipeline/steps/source/_99_group_average.py +++ b/mne_bids_pipeline/steps/source/_99_group_average.py @@ -6,23 +6,22 @@ from types import SimpleNamespace from typing import Optional -import numpy as np - import mne +import numpy as np from mne_bids import BIDSPath from ..._config_utils import ( + _bids_kwargs, + get_fs_subject, get_fs_subjects_dir, + get_sessions, get_subjects, sanitize_cond_name, - get_fs_subject, - get_sessions, - _bids_kwargs, ) -from ..._logging import logger, gen_log_kwargs +from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func from ..._report import _all_conditions, _open_report -from ..._run import failsafe_run, save_logs, _prep_out_files +from ..._run import _prep_out_files, failsafe_run, save_logs def _stc_path( diff --git a/mne_bids_pipeline/steps/source/__init__.py b/mne_bids_pipeline/steps/source/__init__.py index c748f7f8b..89b757670 100644 --- a/mne_bids_pipeline/steps/source/__init__.py +++ b/mne_bids_pipeline/steps/source/__init__.py @@ -1,11 +1,13 @@ """Source-space analysis.""" -from . import _01_make_bem_surfaces -from . import _02_make_bem_solution -from . import _03_setup_source_space -from . import _04_make_forward -from . import _05_make_inverse -from . import _99_group_average +from . import ( + _01_make_bem_surfaces, + _02_make_bem_solution, + _03_setup_source_space, + _04_make_forward, + _05_make_inverse, + _99_group_average, +) _STEPS = ( _01_make_bem_surfaces, diff --git a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py index 47fcb5846..3adfbab82 100644 --- a/mne_bids_pipeline/tests/configs/config_ERP_CORE.py +++ b/mne_bids_pipeline/tests/configs/config_ERP_CORE.py @@ -1,5 +1,4 @@ -""" -ERP CORE +"""ERP CORE. This example demonstrate how to process 5 participants from the [ERP CORE](https://erpinfo.org/erp-core) dataset. It shows how to obtain 7 ERP @@ -24,9 +23,10 @@ [https://doi.org/10.1016/j.neuroimage.2020.117465](https://doi.org/10.1016/j.neuroimage.2020.117465) """ import argparse -import mne import sys +import mne + study_name = "ERP-CORE" bids_root = "~/mne_data/ERP_CORE" deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ERP_CORE" diff --git a/mne_bids_pipeline/tests/configs/config_ds000117.py b/mne_bids_pipeline/tests/configs/config_ds000117.py index b46db99bd..65e213e24 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000117.py +++ b/mne_bids_pipeline/tests/configs/config_ds000117.py @@ -1,6 +1,4 @@ -""" -Faces dataset -""" +"""Faces dataset.""" study_name = "ds000117" bids_root = "~/mne_data/ds000117" diff --git a/mne_bids_pipeline/tests/configs/config_ds000246.py b/mne_bids_pipeline/tests/configs/config_ds000246.py index 6cb3a8148..0c516796d 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000246.py +++ b/mne_bids_pipeline/tests/configs/config_ds000246.py @@ -1,5 +1,4 @@ -""" -Brainstorm - Auditory Dataset. +"""Brainstorm - Auditory Dataset. See https://openneuro.org/datasets/ds000246/versions/1.0.0 for more information. diff --git a/mne_bids_pipeline/tests/configs/config_ds000247.py b/mne_bids_pipeline/tests/configs/config_ds000247.py index 8d2b0451f..0a321d8fe 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000247.py +++ b/mne_bids_pipeline/tests/configs/config_ds000247.py @@ -1,9 +1,6 @@ -""" -OMEGA Resting State Sample Data -""" +"""OMEGA Resting State Sample Data.""" import numpy as np - study_name = "ds000247" bids_root = f"~/mne_data/{study_name}" deriv_root = f"~/mne_data/derivatives/mne-bids-pipeline/{study_name}" diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py index f09fdc6d5..9b77f36b5 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_FLASH_BEM.py @@ -1,6 +1,4 @@ -""" -MNE Sample Data: BEM from FLASH images -""" +"""MNE Sample Data: BEM from FLASH images.""" study_name = "ds000248" bids_root = "~/mne_data/ds000248" deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_FLASH_BEM" diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py b/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py index df315e035..76fee45e3 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_T1_BEM.py @@ -1,6 +1,4 @@ -""" -MNE Sample Data: BEM from T1 images -""" +"""MNE Sample Data: BEM from T1 images.""" study_name = "ds000248" bids_root = "~/mne_data/ds000248" diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py index b80b6f0f0..6ffd9644e 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py @@ -1,6 +1,4 @@ -""" -MNE Sample Data: M/EEG combined processing -""" +"""MNE Sample Data: M/EEG combined processing.""" import mne study_name = "ds000248" diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py b/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py index 9262fdcb8..475ca5d67 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_coreg_surfaces.py @@ -1,6 +1,4 @@ -""" -MNE Sample Data: Head surfaces from FreeSurfer surfaces for coregistration step -""" +"""MNE Sample Data: Head surfaces from FreeSurfer surfaces for coregistration step.""" study_name = "ds000248" bids_root = "~/mne_data/ds000248" diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py index 176a2f592..ebc0ddc88 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_ica.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_ica.py @@ -1,6 +1,4 @@ -""" -MNE Sample Data: ICA -""" +"""MNE Sample Data: ICA.""" study_name = 'MNE "sample" dataset' bids_root = "~/mne_data/ds000248" deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds000248_ica" diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py b/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py index 9941d2842..3b83b0e6e 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_no_mri.py @@ -1,6 +1,4 @@ -""" -MNE Sample Data: Using the `fsaverage` template MRI -""" +"""MNE Sample Data: Using the `fsaverage` template MRI.""" study_name = "ds000248" bids_root = "~/mne_data/ds000248" diff --git a/mne_bids_pipeline/tests/configs/config_ds001810.py b/mne_bids_pipeline/tests/configs/config_ds001810.py index 508a99e64..606fee3c8 100644 --- a/mne_bids_pipeline/tests/configs/config_ds001810.py +++ b/mne_bids_pipeline/tests/configs/config_ds001810.py @@ -1,6 +1,4 @@ -""" -tDCS EEG -""" +"""tDCS EEG.""" study_name = "ds001810" bids_root = "~/mne_data/ds001810" diff --git a/mne_bids_pipeline/tests/configs/config_ds003104.py b/mne_bids_pipeline/tests/configs/config_ds003104.py index c88d07161..2414371c0 100644 --- a/mne_bids_pipeline/tests/configs/config_ds003104.py +++ b/mne_bids_pipeline/tests/configs/config_ds003104.py @@ -1,5 +1,4 @@ -"""Somato -""" +"""Somato.""" study_name = "MNE-somato-data-anonymized" bids_root = "~/mne_data/ds003104" deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003104" diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py index 0decbacc9..756d36fbc 100644 --- a/mne_bids_pipeline/tests/configs/config_ds003392.py +++ b/mne_bids_pipeline/tests/configs/config_ds003392.py @@ -1,6 +1,4 @@ -""" -hMT+ Localizer -""" +"""hMT+ Localizer.""" study_name = "localizer" bids_root = "~/mne_data/ds003392" deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/ds003392" diff --git a/mne_bids_pipeline/tests/configs/config_ds003775.py b/mne_bids_pipeline/tests/configs/config_ds003775.py index 4dae88993..980bed232 100644 --- a/mne_bids_pipeline/tests/configs/config_ds003775.py +++ b/mne_bids_pipeline/tests/configs/config_ds003775.py @@ -1,6 +1,4 @@ -""" -SRM Resting-state EEG -""" +"""SRM Resting-state EEG.""" study_name = "ds003775" bids_root = "~/mne_data/ds003775" diff --git a/mne_bids_pipeline/tests/configs/config_ds004107.py b/mne_bids_pipeline/tests/configs/config_ds004107.py index 7a32d952c..6e0eb1cc6 100644 --- a/mne_bids_pipeline/tests/configs/config_ds004107.py +++ b/mne_bids_pipeline/tests/configs/config_ds004107.py @@ -1,5 +1,4 @@ -""" -MIND DATA +"""MIND DATA. M.P. Weisend, F.M. Hanlon, R. Montaño, S.P. Ahlfors, A.C. Leuthold, D. Pantazis, J.C. Mosher, A.P. Georgopoulos, M.S. Hämäläinen, C.J. diff --git a/mne_bids_pipeline/tests/configs/config_ds004229.py b/mne_bids_pipeline/tests/configs/config_ds004229.py index e4ca6d449..956f92010 100644 --- a/mne_bids_pipeline/tests/configs/config_ds004229.py +++ b/mne_bids_pipeline/tests/configs/config_ds004229.py @@ -1,5 +1,4 @@ -""" -Single-subject infant dataset for testing maxwell_filter with movecomp. +"""Single-subject infant dataset for testing maxwell_filter with movecomp. https://openneuro.org/datasets/ds004229 """ diff --git a/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py b/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py index 643e51799..fbe34b11a 100644 --- a/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py +++ b/mne_bids_pipeline/tests/configs/config_eeg_matchingpennies.py @@ -1,6 +1,4 @@ -""" -Matchingpennies EEG experiment -""" +"""Matchingpennies EEG experiment.""" study_name = "eeg_matchingpennies" bids_root = "~/mne_data/eeg_matchingpennies" diff --git a/mne_bids_pipeline/tests/conftest.py b/mne_bids_pipeline/tests/conftest.py index fa2014634..bd3a1f485 100644 --- a/mne_bids_pipeline/tests/conftest.py +++ b/mne_bids_pipeline/tests/conftest.py @@ -52,7 +52,8 @@ def pytest_configure(config): # self._add_ica( #../python_env/lib/python3.10/site-packages/mne/report/report.py:1872: in _add_ica # self._add_ica_artifact_sources( - #../python_env/lib/python3.10/site-packages/mne/report/report.py:1713: in _add_ica_artifact_sources + #../python_env/lib/python3.10/site-packages/mne/report/report.py:1713: + # in _add_ica_artifact_sources # self._add_figure( always:constrained_layout not applied.*:UserWarning ignore:datetime\.datetime\.utcfromtimestamp.*:DeprecationWarning diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py index b50454251..f96a01042 100644 --- a/mne_bids_pipeline/tests/datasets.py +++ b/mne_bids_pipeline/tests/datasets.py @@ -1,6 +1,6 @@ """Definition of the testing datasets.""" -from typing import Dict, List, TypedDict +from typing import TypedDict # If not supplied below, the effective defaults are listed in comments @@ -9,12 +9,12 @@ class DATASET_OPTIONS_T(TypedDict, total=False): openneuro: str # "" osf: str # "" web: str # "" - include: List[str] # [] - exclude: List[str] # [] + include: list[str] # [] + exclude: list[str] # [] hash: str # "" -DATASET_OPTIONS: Dict[str, DATASET_OPTIONS_T] = { +DATASET_OPTIONS: dict[str, DATASET_OPTIONS_T] = { "ERP_CORE": { # original dataset: "osf": "9f5w7" "web": "https://osf.io/3zk6n/download?version=2", diff --git a/mne_bids_pipeline/tests/test_cli.py b/mne_bids_pipeline/tests/test_cli.py index 607cbdd67..45532c3ce 100644 --- a/mne_bids_pipeline/tests/test_cli.py +++ b/mne_bids_pipeline/tests/test_cli.py @@ -2,7 +2,9 @@ import importlib import sys + import pytest + from mne_bids_pipeline._main import main diff --git a/mne_bids_pipeline/tests/test_documented.py b/mne_bids_pipeline/tests/test_documented.py index 097fc1032..dd90f7ad5 100644 --- a/mne_bids_pipeline/tests/test_documented.py +++ b/mne_bids_pipeline/tests/test_documented.py @@ -1,13 +1,14 @@ """Test that all config values are documented.""" import ast -from pathlib import Path import os import re +from pathlib import Path + import yaml +from mne_bids_pipeline._config_import import _get_default_config from mne_bids_pipeline.tests.datasets import DATASET_OPTIONS from mne_bids_pipeline.tests.test_run import TEST_SUITE -from mne_bids_pipeline._config_import import _get_default_config root_path = Path(__file__).parent.parent @@ -15,7 +16,7 @@ def test_options_documented(): """Test that all options are suitably documented.""" # use ast to parse _config.py for assignments - with open(root_path / "_config.py", "r") as fid: + with open(root_path / "_config.py") as fid: contents = fid.read() contents = ast.parse(contents) in_config = [ @@ -41,7 +42,7 @@ def test_options_documented(): if not fname.endswith(".md"): continue # This is a .md file - with open(Path(dirpath) / fname, "r") as fid: + with open(Path(dirpath) / fname) as fid: for line in fid: if not line.startswith(key): continue @@ -67,7 +68,7 @@ def test_datasets_in_doc(): # So let's make sure they stay in sync. # 1. Read cache, test, etc. entries from CircleCI - with open(root_path.parent / ".circleci" / "config.yml", "r") as fid: + with open(root_path.parent / ".circleci" / "config.yml") as fid: circle_yaml_src = fid.read() circle_yaml = yaml.safe_load(circle_yaml_src) caches = [job[6:] for job in circle_yaml["jobs"] if job.startswith("cache_")] @@ -134,7 +135,7 @@ def ignore_unknown(self, node): None, SafeLoaderIgnoreUnknown.ignore_unknown ) - with open(root_path.parent / "docs" / "mkdocs.yml", "r") as fid: + with open(root_path.parent / "docs" / "mkdocs.yml") as fid: examples = yaml.load(fid.read(), Loader=SafeLoaderIgnoreUnknown) examples = [n for n in examples["nav"] if list(n)[0] == "Examples"][0] examples = [ex for ex in examples["Examples"] if isinstance(ex, str)] diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py index b394d6f0b..4eee1aa02 100644 --- a/mne_bids_pipeline/tests/test_run.py +++ b/mne_bids_pipeline/tests/test_run.py @@ -1,14 +1,15 @@ """Download test data and run a test suite.""" -import sys +import os import shutil +import sys +from collections.abc import Collection from pathlib import Path -from typing import Collection, Dict, Optional, TypedDict -import os +from typing import Optional, TypedDict import pytest -from mne_bids_pipeline._main import main from mne_bids_pipeline._download import main as download_main +from mne_bids_pipeline._main import main BIDS_PIPELINE_DIR = Path(__file__).absolute().parents[1] @@ -24,12 +25,12 @@ class _TestOptionsT(TypedDict, total=False): config: str # f"config_{key}.py" steps: Collection[str] # ("preprocessing", "sensor") task: Optional[str] # None - env: Dict[str, str] # {} + env: dict[str, str] # {} requires: Collection[str] # () extra_config: str # "" -TEST_SUITE: Dict[str, _TestOptionsT] = { +TEST_SUITE: dict[str, _TestOptionsT] = { "ds003392": {}, "ds004229": {}, "ds001971": {}, diff --git a/mne_bids_pipeline/tests/test_validation.py b/mne_bids_pipeline/tests/test_validation.py index 25d5abdaa..c47432155 100644 --- a/mne_bids_pipeline/tests/test_validation.py +++ b/mne_bids_pipeline/tests/test_validation.py @@ -1,4 +1,5 @@ import pytest + from mne_bids_pipeline._config_import import _import_config diff --git a/mne_bids_pipeline/typing.py b/mne_bids_pipeline/typing.py index 7b989309c..c52484f15 100644 --- a/mne_bids_pipeline/typing.py +++ b/mne_bids_pipeline/typing.py @@ -2,31 +2,30 @@ import pathlib import sys -from typing import Union, List, Dict -from typing_extensions import Annotated +from typing import Annotated, Union if sys.version_info < (3, 12): from typing_extensions import TypedDict else: from typing import TypedDict +import mne import numpy as np from numpy.typing import ArrayLike from pydantic import PlainValidator -import mne PathLike = Union[str, pathlib.Path] class ArbitraryContrast(TypedDict): name: str - conditions: List[str] - weights: List[float] + conditions: list[str] + weights: list[float] class LogKwargsT(TypedDict): msg: str - extra: Dict[str, str] + extra: dict[str, str] class ReferenceRunParams(TypedDict): diff --git a/pyproject.toml b/pyproject.toml index c3c5dbb2b..bac831873 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -116,7 +116,17 @@ testpaths = ["mne_bids_pipeline"] junit_family = "xunit2" [tool.ruff] +select = ["A", "B006", "D", "E", "F", "I", "W", "UP"] exclude = ["**/freesurfer/contrib", "dist/", "build/"] +ignore = [ + "D100", # Missing docstring in public module + "D101", # Missing docstring in public class + "D103", # Missing docstring in public function + "D104", # Missing docstring in public package + "D413", # Missing blank line after last section + "UP031", # Use format specifiers instead of percent format + "UP035", # Import Iterable from collections.abc +] -[tool.black] -exclude = "(.*/freesurfer/contrib/.*)|(dist/)|(build/)" +[tool.ruff.pydocstyle] +convention = "numpy" From ce233bdd53b2e8df31a9b82898efd432cfa14d71 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sat, 27 Jan 2024 11:58:13 -0500 Subject: [PATCH 4/8] ENH: Add artifact regression (#837) --- .circleci/config.yml | 56 ++++++ docs/mkdocs.yml | 3 +- docs/source/examples/gen_examples.py | 14 +- docs/source/settings/preprocessing/ssp_ica.md | 1 + docs/source/v1.6.md.inc | 4 +- mne_bids_pipeline/_config.py | 57 ++++-- mne_bids_pipeline/_config_import.py | 54 ++---- mne_bids_pipeline/_download.py | 15 +- mne_bids_pipeline/_import_data.py | 33 ++-- mne_bids_pipeline/_report.py | 8 +- mne_bids_pipeline/_run.py | 9 +- .../preprocessing/_04_frequency_filter.py | 34 +++- .../preprocessing/_05_regress_artifact.py | 172 ++++++++++++++++++ .../steps/preprocessing/_06a_run_ica.py | 3 +- .../steps/preprocessing/_06b_run_ssp.py | 5 +- ...{_05_make_epochs.py => _07_make_epochs.py} | 0 .../{_07a_apply_ica.py => _08a_apply_ica.py} | 2 +- .../{_07b_apply_ssp.py => _08b_apply_ssp.py} | 4 +- .../{_08_ptp_reject.py => _09_ptp_reject.py} | 0 .../steps/preprocessing/__init__.py | 18 +- .../configs/config_MNE_phantom_KIT_data.py | 28 +++ mne_bids_pipeline/tests/datasets.py | 4 + mne_bids_pipeline/tests/test_run.py | 3 + mne_bids_pipeline/tests/test_validation.py | 2 +- 24 files changed, 427 insertions(+), 102 deletions(-) create mode 100644 mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py rename mne_bids_pipeline/steps/preprocessing/{_05_make_epochs.py => _07_make_epochs.py} (100%) rename mne_bids_pipeline/steps/preprocessing/{_07a_apply_ica.py => _08a_apply_ica.py} (99%) rename mne_bids_pipeline/steps/preprocessing/{_07b_apply_ssp.py => _08b_apply_ssp.py} (96%) rename mne_bids_pipeline/steps/preprocessing/{_08_ptp_reject.py => _09_ptp_reject.py} (100%) create mode 100644 mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 62e687cba..ceb51dfbf 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -297,6 +297,26 @@ jobs: paths: - ~/mne_data/eeg_matchingpennies + cache_MNE-phantom-KIT-data: + <<: *imageconfig + steps: + - attach_workspace: + at: ~/ + - restore_cache: + keys: + - data-cache-MNE-phantom-KIT-data-1 + - bash_env + - gitconfig # email address is needed for datalad + - run: + name: Get MNE-phantom-KIT-data + command: | + $DOWNLOAD_DATA MNE-phantom-KIT-data + - codecov/upload + - save_cache: + key: data-cache-MNE-phantom-KIT-data-1 + paths: + - ~/mne_data/MNE-phantom-KIT-data + cache_ERP_CORE: <<: *imageconfig steps: @@ -765,6 +785,32 @@ jobs: paths: - mne_data/derivatives/mne-bids-pipeline/eeg_matchingpennies/*/*/*.html + test_MNE-phantom-KIT-data: + <<: *imageconfig + steps: + - attach_workspace: + at: ~/ + - bash_env + - restore_cache: + keys: + - data-cache-MNE-phantom-KIT-data-1 + - run: + name: test MNE-phantom-KIT-data + command: $RUN_TESTS MNE-phantom-KIT-data + - codecov/upload + - store_test_results: + path: ./test-results + - store_artifacts: + path: ./test-results + destination: test-results + - store_artifacts: + path: /home/circleci/reports/MNE-phantom-KIT-data + destination: reports/MNE-phantom-KIT-data + - persist_to_workspace: + root: ~/ + paths: + - mne_data/derivatives/mne-bids-pipeline/MNE-phantom-KIT-data/*/*/*.html + test_ERP_CORE_N400: <<: *imageconfig resource_class: large @@ -1191,6 +1237,15 @@ workflows: - cache_eeg_matchingpennies <<: *filter_tags + - cache_MNE-phantom-KIT-data: + requires: + - setup_env + <<: *filter_tags + - test_MNE-phantom-KIT-data: + requires: + - cache_MNE-phantom-KIT-data + <<: *filter_tags + - cache_ERP_CORE: requires: - setup_env @@ -1242,6 +1297,7 @@ workflows: - test_ds003392 - test_ds004229 - test_eeg_matchingpennies + - test_MNE-phantom-KIT-data - test_ERP_CORE_N400 - test_ERP_CORE_ERN - test_ERP_CORE_LRP diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 8763aa9c0..29107ff32 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -90,7 +90,7 @@ nav: - Epoching: settings/preprocessing/epochs.md - Artifact removal: - Stimulation artifact: settings/preprocessing/stim_artifact.md - - SSP & ICA: settings/preprocessing/ssp_ica.md + - SSP, ICA, and artifact regression: settings/preprocessing/ssp_ica.md - Amplitude-based artifact rejection: settings/preprocessing/artifacts.md - Sensor-level analysis: - Condition contrasts: settings/sensor/contrasts.md @@ -116,6 +116,7 @@ nav: - examples/ds000248_no_mri.md - examples/ds003104.md - examples/eeg_matchingpennies.md + - examples/MNE-phantom-KIT-data.md - examples/ds001810.md - examples/ds000117.md - examples/ds003775.md diff --git a/docs/source/examples/gen_examples.py b/docs/source/examples/gen_examples.py index 1f2514274..b55e526d8 100755 --- a/docs/source/examples/gen_examples.py +++ b/docs/source/examples/gen_examples.py @@ -63,6 +63,8 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict: key = "Maxwell filter" funcs[key] = funcs[key] or config.use_maxwell_filter funcs["Frequency filter"] = config.l_freq or config.h_freq + key = "Artifact regression" + funcs[key] = funcs[key] or (config.regress_artifact is not None) key = "SSP" funcs[key] = funcs[key] or (config.spatial_filter == "ssp") key = "ICA" @@ -144,6 +146,7 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict: logger.warning(f"Dataset {dataset_name} has no HTML report.") continue + assert dataset_options_key in DATASET_OPTIONS, dataset_options_key options = DATASET_OPTIONS[dataset_options_key].copy() # we modify locally report_str = "\n## Generated output\n\n" @@ -200,13 +203,18 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict: f"{fname.name} :fontawesome-solid-square-poll-vertical:\n\n" ) - assert sum(key in options for key in ("openneuro", "git", "web", "datalad")) == 1 + assert ( + sum(key in options for key in ("openneuro", "git", "web", "datalad", "mne")) + == 1 + ) if "openneuro" in options: url = f'https://openneuro.org/datasets/{options["openneuro"]}' elif "git" in options: url = options["git"] elif "web" in options: url = options["web"] + elif "mne" in options: + url = f"https://mne.tools/dev/generated/mne.datasets.{options['mne']}.data_path.html" # noqa: E501 else: assert "datalad" in options # guaranteed above url = "" @@ -246,7 +254,9 @@ def _gen_demonstrated_funcs(example_config_path: Path) -> dict: # TODO: For things like ERP_CORE_ERN, decoding_csp are not populated # properly by the root config - config_path = root / "tests" / "configs" / f"config_{dataset_name}.py" + config_path = ( + root / "tests" / "configs" / f"config_{dataset_name.replace('-', '_')}.py" + ) config = config_path.read_text(encoding="utf-8-sig").strip() descr_end_idx = config[2:].find('"""') config_descr = "# " + config[: descr_end_idx + 1].replace('"""', "").strip() diff --git a/docs/source/settings/preprocessing/ssp_ica.md b/docs/source/settings/preprocessing/ssp_ica.md index b132ef4bf..f25110729 100644 --- a/docs/source/settings/preprocessing/ssp_ica.md +++ b/docs/source/settings/preprocessing/ssp_ica.md @@ -11,6 +11,7 @@ tags: ::: mne_bids_pipeline._config options: members: + - regress_artifact - spatial_filter - min_ecg_epochs - min_eog_epochs diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index cf5596cb1..afb7835c3 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -2,9 +2,9 @@ ## vX.Y.0 (unreleased) -[//]: # (### :new: New features & enhancements) +:new: New features & enhancements -[//]: # (- Whatever (#000 by @whoever)) +- Added [`regress_artifact`][mne_bids_pipeline._config.regress_artifact] to allow artifact regression (e.g., of MEG reference sensors in KIT systems) (#837 by @larsoner) [//]: # (### :warning: Behavior changes) diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py index 652e5ebfb..e3c7626bb 100644 --- a/mne_bids_pipeline/_config.py +++ b/mne_bids_pipeline/_config.py @@ -1,7 +1,8 @@ # Default settings for data processing and analysis. -from typing import Callable, Iterable, Literal, Optional, Union +from typing import Annotated, Any, Callable, Literal, Optional, Sequence, Union +from annotated_types import Ge, Interval, Len from mne import Covariance from mne_bids import BIDSPath @@ -94,7 +95,7 @@ The task to process. """ -runs: Union[Iterable, Literal["all"]] = "all" +runs: Union[Sequence, Literal["all"]] = "all" """ The runs to process. If `'all'`, will process all runs found in the BIDS dataset. @@ -143,7 +144,7 @@ The BIDS `space` entity. """ -plot_psd_for_runs: Union[Literal["all"], Iterable[str]] = "all" +plot_psd_for_runs: Union[Literal["all"], Sequence[str]] = "all" """ For which runs to add a power spectral density (PSD) plot to the generated report. This can take a considerable amount of time if you have many long @@ -151,7 +152,7 @@ plotting. """ -subjects: Union[Iterable[str], Literal["all"]] = "all" +subjects: Union[Sequence[str], Literal["all"]] = "all" """ Subjects to analyze. If `'all'`, include all subjects. To only include a subset of subjects, pass a list of their identifiers. Even @@ -171,7 +172,7 @@ ``` """ -exclude_subjects: Iterable[str] = [] +exclude_subjects: Sequence[str] = [] """ Specify subjects to exclude from analysis. The MEG empty-room mock-subject is automatically excluded from regular analysis. @@ -201,7 +202,7 @@ covariance (via `noise_cov='rest'`). """ -ch_types: Iterable[Literal["meg", "mag", "grad", "eeg"]] = [] +ch_types: Annotated[Sequence[Literal["meg", "mag", "grad", "eeg"]], Len(1, 4)] = [] """ The channel types to consider. @@ -252,7 +253,7 @@ ``` """ -eog_channels: Optional[Iterable[str]] = None +eog_channels: Optional[Sequence[str]] = None """ Specify EOG channels to use, or create virtual EOG channels. @@ -320,7 +321,7 @@ ``` """ -eeg_reference: Union[Literal["average"], str, Iterable["str"]] = "average" +eeg_reference: Union[Literal["average"], str, Sequence["str"]] = "average" """ The EEG reference to use. If `average`, will use the average reference, i.e. the average across all channels. If a string, must be the name of a single @@ -371,7 +372,7 @@ ``` """ -drop_channels: Iterable[str] = [] +drop_channels: Sequence[str] = [] """ Names of channels to remove from the data. This can be useful, for example, if you have added a new bipolar channel via `eeg_bipolar_channels` and now wish @@ -385,7 +386,7 @@ """ analyze_channels: Union[ - Literal["all"], Literal["ch_types"], Iterable["str"] + Literal["all"], Literal["ch_types"], Sequence["str"] ] = "ch_types" """ The names of the channels to analyze during ERP/ERF and time-frequency analysis @@ -789,7 +790,7 @@ Keep it `None` if no lowpass filtering should be applied. """ -notch_freq: Optional[Union[float, Iterable[float]]] = None +notch_freq: Optional[Union[float, Sequence[float]]] = None """ Notch filter frequency. More than one frequency can be supplied, e.g. to remove harmonics. Keep it `None` if no notch filter should be applied. @@ -827,7 +828,7 @@ Specifies the transition bandwidth of the notch filter. The default is `1.`. """ -notch_widths: Optional[Union[float, Iterable[float]]] = None +notch_widths: Optional[Union[float, Sequence[float]]] = None """ Specifies the width of each stop band. `None` uses the MNE default. """ @@ -931,7 +932,7 @@ window for metadata generation. """ -epochs_metadata_keep_first: Optional[Iterable[str]] = None +epochs_metadata_keep_first: Optional[Sequence[str]] = None """ Event groupings using hierarchical event descriptors (HEDs) for which to store the time of the **first** occurrence of any event of this group in a new column @@ -959,7 +960,7 @@ and `first_stimulus`. """ -epochs_metadata_keep_last: Optional[Iterable[str]] = None +epochs_metadata_keep_last: Optional[Sequence[str]] = None """ Same as `epochs_metadata_keep_first`, but for keeping the **last** occurrence of matching event types. The columns indicating the event types @@ -979,7 +980,7 @@ ``` """ # noqa: E501 -conditions: Optional[Union[Iterable[str], dict[str, str]]] = None +conditions: Optional[Union[Sequence[str], dict[str, str]]] = None """ The time-locked events based on which to create evoked responses. This can either be name of the experimental condition as specified in the @@ -1058,7 +1059,7 @@ ``` """ -contrasts: Iterable[Union[tuple[str, str], ArbitraryContrast]] = [] +contrasts: Sequence[Union[tuple[str, str], ArbitraryContrast]] = [] """ The conditions to contrast via a subtraction of ERPs / ERFs. The list elements can either be tuples or dictionaries (or a mix of both). Each element in the @@ -1125,6 +1126,24 @@ # # Currently you cannot use both. +regress_artifact: Optional[dict[str, Any]] = None +""" +Keyword arguments to pass to the `mne.preprocessing.EOGRegression` model used +in `mne.preprocessing.regress_artifact`. If `None`, no time-domain regression will +be applied. Note that any channels picked in `regress_artifact["picks_artifact"]` will +have the same time-domain filters applied to them as the experimental data. + +Artifact regression is applied before SSP or ICA. + +???+ example "Example" + For example, if you have MEG reference channel data recorded in three + miscellaneous channels, you could do: + + ```python + regress_artifact = {"picks": "meg", "picks_artifact": ["MISC 001", "MISC 002", "MISC 003"]} + ``` +""" # noqa: E501 + spatial_filter: Optional[Literal["ssp", "ica"]] = None """ Whether to use a spatial filter to detect and remove artifacts. The BIDS @@ -1516,7 +1535,7 @@ you don't need to be worried about **exactly** balancing class sizes. """ -decoding_n_splits: int = 5 +decoding_n_splits: Annotated[int, Ge(2)] = 5 """ The number of folds (also called "splits") to use in the K-fold cross-validation scheme. @@ -1577,7 +1596,7 @@ test to determine the significance of the decoding scores across participants. """ -cluster_permutation_p_threshold: float = 0.05 +cluster_permutation_p_threshold: Annotated[float, Interval(gt=0, lt=1)] = 0.05 """ The alpha level (p-value, p threshold) to use for rejecting the null hypothesis that the clusters show no significant difference between conditions. This is @@ -1609,7 +1628,7 @@ # TIME-FREQUENCY # -------------- -time_frequency_conditions: Iterable[str] = [] +time_frequency_conditions: Sequence[str] = [] """ The conditions to compute time-frequency decomposition on. diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py index 66fe9583a..db5487cb7 100644 --- a/mne_bids_pipeline/_config_import.py +++ b/mne_bids_pipeline/_config_import.py @@ -12,8 +12,7 @@ import matplotlib import mne import numpy as np -from pydantic import ValidationError -from pydantic.dataclasses import dataclass +from pydantic import BaseModel, ConfigDict, ValidationError from ._logging import gen_log_kwargs, logger from .typing import PathLike @@ -269,17 +268,6 @@ def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> N f'ica_reject["{ch_type}"] ({ica_reject[ch_type]})' ) - if not config.ch_types: - raise ValueError("Please specify ch_types in your configuration.") - - _VALID_TYPES = ("meg", "mag", "grad", "eeg") - if any(ch_type not in _VALID_TYPES for ch_type in config.ch_types): - raise ValueError( - "Invalid channel type passed. Please adjust `ch_types` in your " - f"configuration, got {config.ch_types} but supported types are " - f"{_VALID_TYPES}" - ) - if config.noise_cov == "emptyroom" and "eeg" in config.ch_types: raise ValueError( "You requested to process data that contains EEG channels. In " @@ -312,16 +300,7 @@ def _check_config(config: SimpleNamespace, config_path: Optional[PathLike]) -> N f"but you set baseline={bl}" ) - # check decoding parameters - if config.decoding_n_splits < 2: - raise ValueError("decoding_n_splits should be at least 2.") - # check cluster permutation parameters - if not 0 < config.cluster_permutation_p_threshold < 1: - raise ValueError( - "cluster_permutation_p_threshold should be in the (0, 1) interval." - ) - if config.cluster_n_permutations < 10 / config.cluster_permutation_p_threshold: raise ValueError( "cluster_n_permutations is not big enough to calculate " @@ -380,33 +359,30 @@ def _pydantic_validate( # https://docs.pydantic.dev/latest/usage/dataclasses/ from . import _config as root_config - annotations = copy.deepcopy(root_config.__annotations__) # just be safe - attrs = { - key: _default_factory(key, val) - for key, val in root_config.__dict__.items() - if key in annotations - } - # everything should be type annotated, make sure they are - asym = set(attrs).symmetric_difference(set(annotations)) - assert asym == set(), asym + # Modify annotations to add nested strict parsing + annotations = dict() + attrs = dict() + for key, annot in root_config.__annotations__.items(): + annotations[key] = annot + attrs[key] = _default_factory(key, root_config.__dict__[key]) name = "user configuration" if config_path is not None: name += f" from {config_path}" - UserConfig = type( - name, - (object,), - {"__annotations__": annotations, **attrs}, - ) - dataclass_config = dict( + model_config = ConfigDict( arbitrary_types_allowed=False, validate_assignment=True, strict=True, # do not allow float for int for example + extra="forbid", + ) + UserConfig = type( + name, + (BaseModel,), + {"__annotations__": annotations, "model_config": model_config, **attrs}, ) - UserConfig = dataclass(config=dataclass_config)(UserConfig) # Now use pydantic to automagically validate user_vals = {key: val for key, val in config.__dict__.items() if key in annotations} try: - UserConfig(**user_vals) + UserConfig.model_validate(user_vals) except ValidationError as err: raise ValueError(str(err)) from None diff --git a/mne_bids_pipeline/_download.py b/mne_bids_pipeline/_download.py index 45de893ed..46cf17e7a 100644 --- a/mne_bids_pipeline/_download.py +++ b/mne_bids_pipeline/_download.py @@ -77,13 +77,24 @@ def _download_from_web(*, ds_name: str, ds_path: Path): (path / f"{ds_name}.zip").unlink() +def _download_via_mne(*, ds_name: str, ds_path: Path): + assert ds_path.stem == ds_name, ds_path + getattr(mne.datasets, DATASET_OPTIONS[ds_name]["mne"]).data_path( + ds_path.parent, + verbose=True, + ) + + def _download(*, ds_name: str, ds_path: Path): options = DATASET_OPTIONS[ds_name] openneuro_name = options.get("openneuro", "") git_url = options.get("git", "") osf_node = options.get("osf", "") web_url = options.get("web", "") - assert sum(bool(x) for x in (openneuro_name, git_url, osf_node, web_url)) == 1 + mne_mod = options.get("mne", "") + assert ( + sum(bool(x) for x in (openneuro_name, git_url, osf_node, web_url, mne_mod)) == 1 + ) if openneuro_name: download_func = _download_via_openneuro @@ -91,6 +102,8 @@ def _download(*, ds_name: str, ds_path: Path): download_func = _download_via_datalad elif osf_node: raise RuntimeError("OSF downloads are currently not supported.") + elif mne_mod: + download_func = _download_via_mne else: assert web_url download_func = _download_from_web diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py index d7f22240d..be892576b 100644 --- a/mne_bids_pipeline/_import_data.py +++ b/mne_bids_pipeline/_import_data.py @@ -452,7 +452,6 @@ def import_er_data( cfg=cfg, bids_path_bads=bids_path_er_bads_in, ) - raw_er.pick("meg", exclude=[]) # Don't deal with ref for now (initial data quality / auto bad step) if bids_path_ref_in is None: @@ -530,7 +529,7 @@ def _get_bids_path_in( session: Optional[str], run: Optional[str], task: Optional[str], - kind: Literal["orig", "sss"] = "orig", + kind: Literal["orig", "sss", "filt"] = "orig", ) -> BIDSPath: # b/c can be used before this is updated path_kwargs = dict( @@ -544,13 +543,13 @@ def _get_bids_path_in( datatype=get_datatype(config=cfg), check=False, ) - if kind == "sss": + if kind != "orig": + assert kind in ("sss", "filt"), kind path_kwargs["root"] = cfg.deriv_root path_kwargs["suffix"] = "raw" path_kwargs["extension"] = ".fif" - path_kwargs["processing"] = "sss" + path_kwargs["processing"] = kind else: - assert kind == "orig", kind path_kwargs["root"] = cfg.bids_root path_kwargs["suffix"] = None path_kwargs["extension"] = None @@ -566,7 +565,7 @@ def _get_run_path( session: Optional[str], run: Optional[str], task: Optional[str], - kind: Literal["orig", "sss"], + kind: Literal["orig", "sss", "filt"], add_bads: Optional[bool] = None, allow_missing: bool = False, key: Optional[str] = None, @@ -594,7 +593,7 @@ def _get_rest_path( cfg: SimpleNamespace, subject: str, session: Optional[str], - kind: Literal["orig", "sss"], + kind: Literal["orig", "sss", "filt"], add_bads: Optional[bool] = None, ) -> dict: if not (cfg.process_rest and not cfg.task_is_rest): @@ -616,13 +615,14 @@ def _get_noise_path( cfg: SimpleNamespace, subject: str, session: Optional[str], - kind: Literal["orig", "sss"], + kind: Literal["orig", "sss", "filt"], mf_reference_run: Optional[str], add_bads: Optional[bool] = None, ) -> dict: if not (cfg.process_empty_room and get_datatype(config=cfg) == "meg"): return dict() - if kind == "sss": + if kind != "orig": + assert kind in ("sss", "filt") raw_fname = _get_bids_path_in( cfg=cfg, subject=subject, @@ -661,7 +661,7 @@ def _get_run_rest_noise_path( session: Optional[str], run: Optional[str], task: Optional[str], - kind: Literal["orig", "sss"], + kind: Literal["orig", "sss", "filt"], mf_reference_run: Optional[str], add_bads: Optional[bool] = None, ) -> dict: @@ -705,7 +705,7 @@ def _path_dict( cfg: SimpleNamespace, bids_path_in: BIDSPath, add_bads: Optional[bool] = None, - kind: Literal["orig", "sss"], + kind: Literal["orig", "sss", "filt"], allow_missing: bool, key: Optional[str] = None, ) -> dict: @@ -805,3 +805,14 @@ def _import_data_kwargs(*, config: SimpleNamespace, subject: str) -> dict: runs=get_runs(config=config, subject=subject), # XXX needs to accept session! **_bids_kwargs(config=config), ) + + +def _get_run_type( + run: Optional[str], + task: Optional[str], +) -> str: + if run is None and task in ("noise", "rest"): + run_type = dict(rest="resting-state", noise="empty-room")[task] + else: + run_type = "experimental" + return run_type diff --git a/mne_bids_pipeline/_report.py b/mne_bids_pipeline/_report.py index ed514925d..80f2f1962 100644 --- a/mne_bids_pipeline/_report.py +++ b/mne_bids_pipeline/_report.py @@ -68,14 +68,13 @@ def _open_report( yield report finally: try: - msg = "Adding config and sys info to report" - logger.info(**gen_log_kwargs(message=msg)) _finalize( report=report, exec_params=exec_params, subject=subject, session=session, run=run, + task=task, ) except Exception as exc: logger.warning(f"Failed: {exc}") @@ -506,12 +505,17 @@ def _finalize( subject: str, session: Optional[str], run: Optional[str], + task: Optional[str], ) -> None: """Add system information and the pipeline configuration to the report.""" # ensure they are always appended titles = ["Configuration file", "System information"] for title in titles: report.remove(title=title, remove_all=True) + # Print this exactly once + if _cached_sys_info.cache_info()[-1] == 0: # never run + msg = "Adding config and sys info to report" + logger.info(**gen_log_kwargs(message=msg)) # No longer need replace=True in these report.add_code( code=exec_params.config_path, diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py index 128b876ed..c7e46267b 100644 --- a/mne_bids_pipeline/_run.py +++ b/mne_bids_pipeline/_run.py @@ -225,13 +225,18 @@ def wrapper(*args, **kwargs): for key, (fname, this_hash) in out_files_hashes.items(): fname = pathlib.Path(fname) if not fname.exists(): - msg = "Output file missing, will recompute …" + msg = ( + f"Output file missing {str(fname)}, " "will recompute …" + ) emoji = "🧩" bad_out_files = True break got_hash = hash_(key, fname, kind="out")[1] if this_hash != got_hash: - msg = "Output file hash mismatch, will recompute …" + msg = ( + f"Output file hash mismatch for {str(fname)}, " + "will recompute …" + ) emoji = "🚫" bad_out_files = True break diff --git a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py index a44a1c70e..fd9c6c874 100644 --- a/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py +++ b/mne_bids_pipeline/steps/preprocessing/_04_frequency_filter.py @@ -20,6 +20,8 @@ import mne import numpy as np +from mne.io.pick import _picks_to_idx +from mne.preprocessing import EOGRegression from ..._config_utils import ( get_runs_tasks, @@ -28,6 +30,7 @@ ) from ..._import_data import ( _get_run_rest_noise_path, + _get_run_type, _import_data_kwargs, import_er_data, import_experimental_data, @@ -69,6 +72,7 @@ def notch_filter( trans_bandwidth: Union[float, Literal["auto"]], notch_widths: Optional[Union[float, Iterable[float]]], run_type: Literal["experimental", "empty-room", "resting-state"], + picks: Optional[np.ndarray], ) -> None: """Filter data channels (MEG and EEG).""" if freqs is None: @@ -86,6 +90,7 @@ def notch_filter( trans_bandwidth=trans_bandwidth, notch_widths=notch_widths, n_jobs=1, + picks=picks, ) @@ -100,6 +105,7 @@ def bandpass_filter( l_trans_bandwidth: Union[float, Literal["auto"]], h_trans_bandwidth: Union[float, Literal["auto"]], run_type: Literal["experimental", "empty-room", "resting-state"], + picks: Optional[np.ndarray], ) -> None: """Filter data channels (MEG and EEG).""" if l_freq is not None and h_freq is None: @@ -122,6 +128,7 @@ def bandpass_filter( l_trans_bandwidth=l_trans_bandwidth, h_trans_bandwidth=h_trans_bandwidth, n_jobs=1, + picks=picks, ) @@ -161,14 +168,10 @@ def filter_data( bids_path_in = in_files.pop(in_key) bids_path_bads_in = in_files.pop(f"{in_key}-bads", None) - if run is None and task in ("noise", "rest"): - run_type = dict(rest="resting-state", noise="empty-room")[task] - else: - run_type = "experimental" - + run_type = _get_run_type(run=run, task=task) + msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}" + logger.info(**gen_log_kwargs(message=msg)) if cfg.use_maxwell_filter: - msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}" - logger.info(**gen_log_kwargs(message=msg)) raw = mne.io.read_raw_fif(bids_path_in) elif run is None and task == "noise": raw = import_er_data( @@ -191,6 +194,8 @@ def filter_data( out_files[in_key] = bids_path_in.copy().update( root=cfg.deriv_root, + subject=subject, # save under subject's directory so all files are there + session=session, processing="filt", extension=".fif", suffix="raw", @@ -200,6 +205,18 @@ def filter_data( check=False, ) + if cfg.regress_artifact is None: + picks = None + else: + # Need to figure out the correct picks to use + model = EOGRegression(**cfg.regress_artifact) + picks_regress = _picks_to_idx( + raw.info, model.picks, none="data", exclude=model.exclude + ) + picks_artifact = _picks_to_idx(raw.info, model.picks_artifact) + picks_data = _picks_to_idx(raw.info, "data", exclude=()) # raw.filter default + picks = np.unique(np.r_[picks_regress, picks_artifact, picks_data]) + raw.load_data() notch_filter( raw=raw, @@ -211,6 +228,7 @@ def filter_data( trans_bandwidth=cfg.notch_trans_bandwidth, notch_widths=cfg.notch_widths, run_type=run_type, + picks=picks, ) bandpass_filter( raw=raw, @@ -223,6 +241,7 @@ def filter_data( h_trans_bandwidth=cfg.h_trans_bandwidth, l_trans_bandwidth=cfg.l_trans_bandwidth, run_type=run_type, + picks=picks, ) resample( raw=raw, @@ -287,6 +306,7 @@ def get_config( notch_trans_bandwidth=config.notch_trans_bandwidth, notch_widths=config.notch_widths, raw_resample_sfreq=config.raw_resample_sfreq, + regress_artifact=config.regress_artifact, **_import_data_kwargs(config=config, subject=subject), ) return cfg diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py new file mode 100644 index 000000000..8a2b2a0f6 --- /dev/null +++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py @@ -0,0 +1,172 @@ +"""Run Signal Subspace Projections (SSP) for artifact correction. + +These are often also referred to as PCA vectors. +""" + +from types import SimpleNamespace +from typing import Optional + +import mne +from mne.io.pick import _picks_to_idx +from mne.preprocessing import EOGRegression + +from ..._config_utils import ( + get_runs_tasks, + get_sessions, + get_subjects, +) +from ..._import_data import _get_run_rest_noise_path, _get_run_type, _import_data_kwargs +from ..._logging import gen_log_kwargs, logger +from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _add_raw, _open_report +from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs + + +def get_input_fnames_regress_artifact( + *, + cfg: SimpleNamespace, + subject: str, + session: Optional[str], + run: str, + task: Optional[str], +) -> dict: + """Get paths of files required by regress_artifact function.""" + out = _get_run_rest_noise_path( + cfg=cfg, + subject=subject, + session=session, + run=run, + task=task, + kind="filt", + mf_reference_run=cfg.mf_reference_run, + ) + assert len(out) + return out + + +@failsafe_run( + get_input_fnames=get_input_fnames_regress_artifact, +) +def run_regress_artifact( + *, + cfg: SimpleNamespace, + exec_params: SimpleNamespace, + subject: str, + session: Optional[str], + run: str, + task: Optional[str], + in_files: dict, +) -> dict: + model = EOGRegression(proj=False, **cfg.regress_artifact) + out_files = dict() + in_key = f"raw_task-{task}_run-{run}" + bids_path_in = in_files.pop(in_key) + out_files[in_key] = bids_path_in.copy().update(processing="regress") + run_type = _get_run_type(run=run, task=task) + msg = f"Reading {run_type} recording: " f"{bids_path_in.basename}" + logger.info(**gen_log_kwargs(message=msg)) + raw = mne.io.read_raw_fif(bids_path_in).load_data() + projs = raw.info["projs"] + raw.del_proj() + model.fit(raw) + all_types = raw.get_channel_types() + picks = _picks_to_idx(raw.info, model.picks, none="data", exclude=model.exclude) + ch_types = set(all_types[pick] for pick in picks) + del picks + out_files["regress"] = bids_path_in.copy().update( + processing=None, + split=None, + run=None, + suffix="regress", + extension=".h5", + ) + model.apply(raw, copy=False) + if projs: + raw.add_proj(projs) + raw.save(out_files[in_key], overwrite=True) + _update_for_splits(out_files, in_key) + model.save(out_files["regress"], overwrite=True) + assert len(in_files) == 0, in_files.keys() + + # Report + with _open_report( + cfg=cfg, + exec_params=exec_params, + subject=subject, + session=session, + run=run, + task=task, + ) as report: + msg = "Adding regressed raw data to report" + logger.info(**gen_log_kwargs(message=msg)) + figs, captions = list(), list() + for kind in ("mag", "grad", "eeg"): + if kind not in ch_types: + continue + figs.append(model.plot(ch_type=kind)) + captions.append(f"Run {run}: {kind}") + if figs: + report.add_figure( + fig=figs, + caption=captions, + title="Regression weights", + tags=("raw", f"run-{run}", "regression"), + replace=True, + ) + _add_raw( + cfg=cfg, + report=report, + bids_path_in=out_files[in_key], + title="Raw (regression)", + tags=("regression",), + raw=raw, + ) + return _prep_out_files(exec_params=exec_params, out_files=out_files) + + +def get_config( + *, + config: SimpleNamespace, + subject: str, +) -> SimpleNamespace: + cfg = SimpleNamespace( + regress_artifact=config.regress_artifact, + **_import_data_kwargs(config=config, subject=subject), + ) + return cfg + + +def main(*, config: SimpleNamespace) -> None: + """Run artifact regression.""" + if config.regress_artifact is None: + msg = "Skipping …" + logger.info(**gen_log_kwargs(message=msg, emoji="skip")) + return + + with get_parallel_backend(config.exec_params): + parallel, run_func = parallel_func( + run_regress_artifact, exec_params=config.exec_params + ) + + logs = parallel( + run_func( + cfg=get_config( + config=config, + subject=subject, + ), + exec_params=config.exec_params, + subject=subject, + session=session, + run=run, + task=task, + ) + for subject in get_subjects(config) + for session in get_sessions(config) + for run, task in get_runs_tasks( + config=config, + subject=subject, + session=session, + ) + ) + + save_logs(config=config, logs=logs) diff --git a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py index 00346df25..7bfef3c56 100644 --- a/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py +++ b/mne_bids_pipeline/steps/preprocessing/_06a_run_ica.py @@ -253,7 +253,7 @@ def get_input_fnames_run_ica( for run in cfg.runs: key = f"raw_run-{run}" in_files[key] = bids_basename.copy().update( - run=run, processing="filt", suffix="raw" + run=run, processing=cfg.processing, suffix="raw" ) _update_for_splits(in_files, key, single=True) return in_files @@ -614,6 +614,7 @@ def get_config( eog_channels=config.eog_channels, rest_epochs_duration=config.rest_epochs_duration, rest_epochs_overlap=config.rest_epochs_overlap, + processing="filt" if config.regress_artifact is None else "regress", **_bids_kwargs(config=config), ) return cfg diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py index 46b88ee90..7aa0e97de 100644 --- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py +++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py @@ -47,7 +47,7 @@ def get_input_fnames_run_ssp( for run in cfg.runs: key = f"raw_run-{run}" in_files[key] = bids_basename.copy().update( - run=run, processing="filt", suffix="raw" + run=run, processing=cfg.processing, suffix="raw" ) _update_for_splits(in_files, key, single=True) return in_files @@ -66,7 +66,7 @@ def run_ssp( ) -> dict: import matplotlib.pyplot as plt - # compute SSP on first run of raw + # compute SSP on all runs of raw raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs] # when saving proj, use run=None @@ -229,6 +229,7 @@ def get_config( epochs_decim=config.epochs_decim, use_maxwell_filter=config.use_maxwell_filter, runs=get_runs(config=config, subject=subject), + processing="filt" if config.regress_artifact is None else "regress", **_bids_kwargs(config=config), ) return cfg diff --git a/mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py similarity index 100% rename from mne_bids_pipeline/steps/preprocessing/_05_make_epochs.py rename to mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py diff --git a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py similarity index 99% rename from mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py rename to mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py index c24d8e015..f4b999cc8 100644 --- a/mne_bids_pipeline/steps/preprocessing/_07a_apply_ica.py +++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py @@ -1,4 +1,4 @@ -"""Apply ICA and obtain the cleaned epochs. +"""Apply ICA and obtain the cleaned epochs and raw data. Blinks and ECG artifacts are automatically detected and the corresponding ICA components are removed from the data. diff --git a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py similarity index 96% rename from mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py rename to mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py index 9b1a83fc9..b1eda9cd1 100644 --- a/mne_bids_pipeline/steps/preprocessing/_07b_apply_ssp.py +++ b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py @@ -1,4 +1,4 @@ -"""Apply SSP projections and obtain the cleaned epochs. +"""Apply SSP projections and obtain the cleaned epochs and raw data. Blinks and ECG artifacts are automatically detected and the corresponding SSP projections components are removed from the data. @@ -57,8 +57,6 @@ def apply_ssp( session: Optional[str], in_files: dict, ) -> dict: - # load epochs to reject ICA components - # compute SSP on first run of raw out_files = dict() out_files["epochs"] = ( in_files["epochs"].copy().update(processing="ssp", split=None, check=False) diff --git a/mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py similarity index 100% rename from mne_bids_pipeline/steps/preprocessing/_08_ptp_reject.py rename to mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py diff --git a/mne_bids_pipeline/steps/preprocessing/__init__.py b/mne_bids_pipeline/steps/preprocessing/__init__.py index 686b7cf27..07d65224a 100644 --- a/mne_bids_pipeline/steps/preprocessing/__init__.py +++ b/mne_bids_pipeline/steps/preprocessing/__init__.py @@ -5,12 +5,13 @@ _02_head_pos, _03_maxfilter, _04_frequency_filter, - _05_make_epochs, + _05_regress_artifact, _06a_run_ica, _06b_run_ssp, - _07a_apply_ica, - _07b_apply_ssp, - _08_ptp_reject, + _07_make_epochs, + _08a_apply_ica, + _08b_apply_ssp, + _09_ptp_reject, ) _STEPS = ( @@ -18,10 +19,11 @@ _02_head_pos, _03_maxfilter, _04_frequency_filter, - _05_make_epochs, + _05_regress_artifact, _06a_run_ica, _06b_run_ssp, - _07a_apply_ica, - _07b_apply_ssp, - _08_ptp_reject, + _07_make_epochs, + _08a_apply_ica, + _08b_apply_ssp, + _09_ptp_reject, ) diff --git a/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py b/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py new file mode 100644 index 000000000..ef3347a53 --- /dev/null +++ b/mne_bids_pipeline/tests/configs/config_MNE_phantom_KIT_data.py @@ -0,0 +1,28 @@ +""" +KIT phantom data. + +https://mne.tools/dev/documentation/datasets.html#kit-phantom-dataset +""" + +study_name = "MNE-phantom-KIT-data" +bids_root = "~/mne_data/MNE-phantom-KIT-data" +deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/MNE-phantom-KIT-data" +task = "phantom" +ch_types = ["meg"] + +# Preprocessing +l_freq = None +h_freq = 40.0 +regress_artifact = dict( + picks="meg", picks_artifact=["MISC 001", "MISC 002", "MISC 003"] +) + +# Epochs +epochs_tmin = -0.08 +epochs_tmax = 0.18 +epochs_decim = 10 # 2000->200 Hz +baseline = (None, 0) +conditions = ["dip01", "dip13", "dip25", "dip37", "dip49"] + +# Decoding +decode = True # should be very good performance diff --git a/mne_bids_pipeline/tests/datasets.py b/mne_bids_pipeline/tests/datasets.py index f96a01042..c559f06ca 100644 --- a/mne_bids_pipeline/tests/datasets.py +++ b/mne_bids_pipeline/tests/datasets.py @@ -9,6 +9,7 @@ class DATASET_OPTIONS_T(TypedDict, total=False): openneuro: str # "" osf: str # "" web: str # "" + mne: str # "" include: list[str] # [] exclude: list[str] # [] hash: str # "" @@ -122,4 +123,7 @@ class DATASET_OPTIONS_T(TypedDict, total=False): "sub-emptyroom/ses-20000101", ], }, + "MNE-phantom-KIT-data": { + "mne": "phantom_kit", + }, } diff --git a/mne_bids_pipeline/tests/test_run.py b/mne_bids_pipeline/tests/test_run.py index 4eee1aa02..2e068ef70 100644 --- a/mne_bids_pipeline/tests/test_run.py +++ b/mne_bids_pipeline/tests/test_run.py @@ -124,6 +124,9 @@ class _TestOptionsT(TypedDict, total=False): "config": "config_ERP_CORE.py", "task": "P3", }, + "MNE-phantom-KIT-data": { + "config": "config_MNE_phantom_KIT_data.py", + }, } diff --git a/mne_bids_pipeline/tests/test_validation.py b/mne_bids_pipeline/tests/test_validation.py index c47432155..e99bfecf9 100644 --- a/mne_bids_pipeline/tests/test_validation.py +++ b/mne_bids_pipeline/tests/test_validation.py @@ -14,7 +14,7 @@ def test_validation(tmp_path, capsys): bad_text += f"bids_root = '{tmp_path}'\n" # no ch_types config_path.write_text(bad_text) - with pytest.raises(ValueError, match="Please specify ch_types"): + with pytest.raises(ValueError, match="Value should have at least 1 item"): _import_config(config_path=config_path) bad_text += "ch_types = ['eeg']\n" # conditions From a65f278d92eb7d36914d3c630209dce12a4c6a8a Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 30 Jan 2024 14:37:03 -0500 Subject: [PATCH 5/8] BUG: Fix several bugs (#839) --- docs/source/v1.6.md.inc | 4 ++ mne_bids_pipeline/_config_import.py | 2 +- mne_bids_pipeline/_config_utils.py | 40 ++++++++++--------- mne_bids_pipeline/_main.py | 1 - mne_bids_pipeline/_reject.py | 10 ++--- mne_bids_pipeline/_run.py | 14 ++++++- .../steps/freesurfer/_02_coreg_surfaces.py | 22 +++++++--- .../preprocessing/_05_regress_artifact.py | 1 - .../steps/preprocessing/_09_ptp_reject.py | 19 +++++++-- .../steps/source/_01_make_bem_surfaces.py | 4 +- .../steps/source/_02_make_bem_solution.py | 4 +- .../steps/source/_03_setup_source_space.py | 4 +- 12 files changed, 85 insertions(+), 40 deletions(-) diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index afb7835c3..01bfd87e4 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -5,6 +5,7 @@ :new: New features & enhancements - Added [`regress_artifact`][mne_bids_pipeline._config.regress_artifact] to allow artifact regression (e.g., of MEG reference sensors in KIT systems) (#837 by @larsoner) +- Chosen `reject` parameters are now saved in the generated HTML reports (#839 by @larsoner) [//]: # (### :warning: Behavior changes) @@ -17,6 +18,9 @@ ### :bug: Bug fixes - Fix minor issues with path handling for cross-talk and calibration files (#834 by @larsoner) +- Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner) +- Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner) +- Fix bug where `--no-cache` had no effect (#839 by @larsoner) ### :medical_symbol: Code health diff --git a/mne_bids_pipeline/_config_import.py b/mne_bids_pipeline/_config_import.py index db5487cb7..fa8fb6772 100644 --- a/mne_bids_pipeline/_config_import.py +++ b/mne_bids_pipeline/_config_import.py @@ -369,7 +369,7 @@ def _pydantic_validate( if config_path is not None: name += f" from {config_path}" model_config = ConfigDict( - arbitrary_types_allowed=False, + arbitrary_types_allowed=True, # needed in 2.6.0 to allow DigMontage for example validate_assignment=True, strict=True, # do not allow float for int for example extra="forbid", diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py index 321ccf0f0..7b555a2a4 100644 --- a/mne_bids_pipeline/_config_utils.py +++ b/mne_bids_pipeline/_config_utils.py @@ -16,9 +16,9 @@ from .typing import ArbitraryContrast try: - _keys_arbitrary_contrast = set(ArbitraryContrast.__required_keys__) + _set_keys_arbitrary_contrast = set(ArbitraryContrast.__required_keys__) except Exception: - _keys_arbitrary_contrast = set(ArbitraryContrast.__annotations__.keys()) + _set_keys_arbitrary_contrast = set(ArbitraryContrast.__annotations__.keys()) def get_fs_subjects_dir(config: SimpleNamespace) -> pathlib.Path: @@ -96,11 +96,14 @@ def get_subjects(config: SimpleNamespace) -> list[str]: else: s = config.subjects - subjects = set(s) - set(config.exclude_subjects) - # Drop empty-room subject. - subjects = subjects - set(["emptyroom"]) + # Preserve order and remove excluded subjects + subjects = [ + subject + for subject in s + if subject not in config.exclude_subjects and subject != "emptyroom" + ] - return sorted(subjects) + return subjects def get_sessions(config: SimpleNamespace) -> Union[list[None], list[str]]: @@ -176,7 +179,17 @@ def _get_runs_all_subjects_cached( def get_intersect_run(config: SimpleNamespace) -> list[str]: """Return the intersection of all the runs of all subjects.""" subj_runs = get_runs_all_subjects(config) - return list(set.intersection(*map(set, subj_runs.values()))) + # Do not use something like: + # list(set.intersection(*map(set, subj_runs.values()))) + # as it will not preserve order. Instead just be explicit and preserve order. + # We could use "sorted", but it's probably better to use the order provided by + # the user (if they want to put `runs=["02", "01"]` etc. it's better to use "02") + all_runs = list() + for runs in subj_runs.values(): + for run in runs: + if run not in all_runs: + all_runs.append(run) + return all_runs def get_runs( @@ -429,17 +442,6 @@ def _restrict_analyze_channels( return inst -def _get_scalp_in_files(cfg: SimpleNamespace) -> dict[str, pathlib.Path]: - subject_path = pathlib.Path(cfg.subjects_dir) / cfg.fs_subject - seghead = subject_path / "surf" / "lh.seghead" - in_files = dict() - if seghead.is_file(): - in_files["seghead"] = seghead - else: - in_files["t1"] = subject_path / "mri" / "T1.mgz" - return in_files - - def _get_bem_conductivity(cfg: SimpleNamespace) -> tuple[tuple[float], str]: if cfg.fs_subject in ("fsaverage", cfg.use_template_mri): conductivity = None # should never be used @@ -573,7 +575,7 @@ def _validate_contrasts(contrasts: SimpleNamespace) -> None: if len(contrast) != 2: raise ValueError("Contrasts' tuples MUST be two conditions") elif isinstance(contrast, dict): - if not _keys_arbitrary_contrast.issubset(set(contrast.keys())): + if not _set_keys_arbitrary_contrast.issubset(set(contrast.keys())): raise ValueError(f"Missing key(s) in contrast {contrast}") if len(contrast["conditions"]) != len(contrast["weights"]): raise ValueError( diff --git a/mne_bids_pipeline/_main.py b/mne_bids_pipeline/_main.py index 04ddabe1e..56d14a010 100755 --- a/mne_bids_pipeline/_main.py +++ b/mne_bids_pipeline/_main.py @@ -141,7 +141,6 @@ def main(): steps = (steps,) on_error = "debug" if debug else None - cache = "1" if cache else "0" processing_stages = [] processing_steps = [] diff --git a/mne_bids_pipeline/_reject.py b/mne_bids_pipeline/_reject.py index ca506239d..707984732 100644 --- a/mne_bids_pipeline/_reject.py +++ b/mne_bids_pipeline/_reject.py @@ -45,11 +45,11 @@ def _get_reject( # Only keep thresholds for channel types of interest reject = reject.copy() - if ch_types == ["eeg"]: - ch_types_to_remove = ("mag", "grad") - else: - ch_types_to_remove = ("eeg",) - + ch_types_to_remove = list() + if "meg" not in ch_types: + ch_types_to_remove.extend(("mag", "grad")) + if "eeg" not in ch_types: + ch_types_to_remove.append("eeg") for ch_type in ch_types_to_remove: try: del reject[ch_type] diff --git a/mne_bids_pipeline/_run.py b/mne_bids_pipeline/_run.py index c7e46267b..04deef839 100644 --- a/mne_bids_pipeline/_run.py +++ b/mne_bids_pipeline/_run.py @@ -378,11 +378,21 @@ def _prep_out_files( *, exec_params: SimpleNamespace, out_files: dict[str, BIDSPath], + check_relative: Optional[pathlib.Path] = None, ): + if check_relative is None: + check_relative = exec_params.deriv_root for key, fname in out_files.items(): + # Sanity check that we only ever write to the derivatives directory + fname = pathlib.Path(fname) + if not fname.is_relative_to(check_relative): + raise RuntimeError( + f"Output BIDSPath not relative to expected root {check_relative}:" + f"\n{fname}" + ) out_files[key] = _path_to_str_hash( key, - pathlib.Path(fname), + fname, method=exec_params.memory_file_method, kind="out", ) @@ -401,7 +411,7 @@ def _path_to_str_hash( assert isinstance(v, pathlib.Path), f'Bad type {type(v)}: {kind}_files["{k}"] = {v}' assert v.exists(), f'missing {kind}_files["{k}"] = {v}' if method == "mtime": - this_hash = v.lstat().st_mtime + this_hash = v.stat().st_mtime else: assert method == "hash" # guaranteed this_hash = hash_file_path(v) diff --git a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py index eb5f86151..b2e2f8090 100644 --- a/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py +++ b/mne_bids_pipeline/steps/freesurfer/_02_coreg_surfaces.py @@ -10,7 +10,6 @@ import mne.bem from ..._config_utils import ( - _get_scalp_in_files, get_fs_subject, get_fs_subjects_dir, get_subjects, @@ -22,6 +21,17 @@ fs_bids_app = Path(__file__).parent / "contrib" / "run.py" +def _get_scalp_in_files(cfg: SimpleNamespace) -> dict[str, Path]: + subject_path = Path(cfg.fs_subjects_dir) / cfg.fs_subject + seghead = subject_path / "surf" / "lh.seghead" + in_files = dict() + if seghead.is_file(): + in_files["seghead"] = seghead + else: + in_files["t1"] = subject_path / "mri" / "T1.mgz" + return in_files + + def get_input_fnames_coreg_surfaces( *, cfg: SimpleNamespace, @@ -32,7 +42,7 @@ def get_input_fnames_coreg_surfaces( def get_output_fnames_coreg_surfaces(*, cfg: SimpleNamespace, subject: str) -> dict: out_files = dict() - subject_path = Path(cfg.subjects_dir) / cfg.fs_subject + subject_path = Path(cfg.fs_subjects_dir) / cfg.fs_subject out_files["seghead"] = subject_path / "surf" / "lh.seghead" for key in ("dense", "medium", "sparse"): out_files[f"head-{key}"] = ( @@ -57,19 +67,21 @@ def make_coreg_surfaces( in_files.pop("t1" if "t1" in in_files else "seghead") mne.bem.make_scalp_surfaces( subject=cfg.fs_subject, - subjects_dir=cfg.subjects_dir, + subjects_dir=cfg.fs_subjects_dir, force=True, overwrite=True, ) out_files = get_output_fnames_coreg_surfaces(cfg=cfg, subject=subject) - return _prep_out_files(exec_params=exec_params, out_files=out_files) + return _prep_out_files( + exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir + ) def get_config(*, config, subject) -> SimpleNamespace: cfg = SimpleNamespace( subject=subject, fs_subject=get_fs_subject(config, subject), - subjects_dir=get_fs_subjects_dir(config), + fs_subjects_dir=get_fs_subjects_dir(config), ) return cfg diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py index 8a2b2a0f6..5ab1119a6 100644 --- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py +++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py @@ -76,7 +76,6 @@ def run_regress_artifact( out_files["regress"] = bids_path_in.copy().update( processing=None, split=None, - run=None, suffix="regress", extension=".h5", ) diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py index 7f0bf0607..3584aa72f 100644 --- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py +++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py @@ -187,6 +187,9 @@ def drop_ptp( psd = True else: psd = 30 + tags = ("epochs", "reject") + kind = cfg.reject if isinstance(cfg.reject, str) else "Rejection" + title = "Epochs: after cleaning" with _open_report( cfg=cfg, exec_params=exec_params, subject=subject, session=session ) as report: @@ -201,18 +204,28 @@ def drop_ptp( fig=reject_log.plot( orientation="horizontal", aspect="auto", show=False ), - title="Epochs: Autoreject cleaning", + title=f"{kind} cleaning", caption=caption, - tags=("epochs", "autoreject"), + section=title, + tags=tags, replace=True, ) del caption + else: + report.add_html( + html=f"{reject}", + title=f"{kind} thresholds", + section=title, + replace=True, + tags=tags, + ) report.add_epochs( epochs=epochs, - title="Epochs: after cleaning", + title=title, psd=psd, drop_log_ignore=(), + tags=tags, replace=True, ) return _prep_out_files(exec_params=exec_params, out_files=out_files) diff --git a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py index da2b64890..f77593107 100644 --- a/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py +++ b/mne_bids_pipeline/steps/source/_01_make_bem_surfaces.py @@ -112,7 +112,9 @@ def make_bem_surfaces( subject=subject, session=session, ) - return _prep_out_files(exec_params=exec_params, out_files=out_files) + return _prep_out_files( + exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir + ) def get_config( diff --git a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py index a09d063e2..33f7b870c 100644 --- a/mne_bids_pipeline/steps/source/_02_make_bem_solution.py +++ b/mne_bids_pipeline/steps/source/_02_make_bem_solution.py @@ -69,7 +69,9 @@ def make_bem_solution( out_files = get_output_fnames_make_bem_solution(cfg=cfg, subject=subject) mne.write_bem_surfaces(out_files["model"], bem_model, overwrite=True) mne.write_bem_solution(out_files["sol"], bem_sol, overwrite=True) - return _prep_out_files(exec_params=exec_params, out_files=out_files) + return _prep_out_files( + exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir + ) def get_config( diff --git a/mne_bids_pipeline/steps/source/_03_setup_source_space.py b/mne_bids_pipeline/steps/source/_03_setup_source_space.py index 64e7314ed..52c342dbf 100644 --- a/mne_bids_pipeline/steps/source/_03_setup_source_space.py +++ b/mne_bids_pipeline/steps/source/_03_setup_source_space.py @@ -55,7 +55,9 @@ def run_setup_source_space( in_files.clear() # all used by setup_source_space out_files = get_output_fnames_setup_source_space(cfg=cfg, subject=subject) mne.write_source_spaces(out_files["src"], src, overwrite=True) - return _prep_out_files(exec_params=exec_params, out_files=out_files) + return _prep_out_files( + exec_params=exec_params, out_files=out_files, check_relative=cfg.fs_subjects_dir + ) def get_config( From 2796d017ccc2797a09d29f771443184948b9834b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 Feb 2024 15:03:26 -0500 Subject: [PATCH 6/8] [pre-commit.ci] pre-commit autoupdate (#843) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d8ceaa9ff..6a8375286 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,7 @@ files: ^(.*\.(py|yaml))$ exclude: ^(\.[^/]*cache/.*|.*/freesurfer/contrib/.*)$ repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.14 + rev: v0.2.0 hooks: - id: ruff args: ["--fix"] From 92be6039ce7b644f1a049c255c49a742ff591de8 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 6 Feb 2024 11:20:29 -0500 Subject: [PATCH 7/8] ENH: Write out raw data and SSP events (#840) --- docs/source/v1.6.md.inc | 3 + mne_bids_pipeline/_config.py | 8 +- mne_bids_pipeline/_config_utils.py | 23 ++- .../preprocessing/_05_regress_artifact.py | 2 +- .../steps/preprocessing/_06b_run_ssp.py | 56 ++++--- .../steps/preprocessing/_07_make_epochs.py | 5 +- .../steps/preprocessing/_08a_apply_ica.py | 145 ++++++++++++++++-- .../steps/preprocessing/_08b_apply_ssp.py | 132 +++++++++++++--- .../steps/preprocessing/_09_ptp_reject.py | 2 +- .../steps/sensor/_06_make_cov.py | 4 +- .../steps/sensor/_99_group_average.py | 2 +- .../tests/configs/config_ds000248_base.py | 2 +- 12 files changed, 317 insertions(+), 67 deletions(-) diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index 01bfd87e4..3c87dac23 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -6,6 +6,8 @@ - Added [`regress_artifact`][mne_bids_pipeline._config.regress_artifact] to allow artifact regression (e.g., of MEG reference sensors in KIT systems) (#837 by @larsoner) - Chosen `reject` parameters are now saved in the generated HTML reports (#839 by @larsoner) +- Added saving of clean raw data in addition to epochs (#840 by @larsoner) +- Added saving of detected blink and cardiac events used to calculate SSP projectors (#840 by @larsoner) [//]: # (### :warning: Behavior changes) @@ -21,6 +23,7 @@ - Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner) - Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner) - Fix bug where `--no-cache` had no effect (#839 by @larsoner) +- Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner) ### :medical_symbol: Code health diff --git a/mne_bids_pipeline/_config.py b/mne_bids_pipeline/_config.py index e3c7626bb..041331da5 100644 --- a/mne_bids_pipeline/_config.py +++ b/mne_bids_pipeline/_config.py @@ -1161,14 +1161,14 @@ ways using the configuration options you can find below. """ -min_ecg_epochs: int = 5 +min_ecg_epochs: Annotated[int, Ge(1)] = 5 """ -Minimal number of ECG epochs needed to compute SSP or ICA rejection. +Minimal number of ECG epochs needed to compute SSP projectors. """ -min_eog_epochs: int = 5 +min_eog_epochs: Annotated[int, Ge(1)] = 5 """ -Minimal number of EOG epochs needed to compute SSP or ICA rejection. +Minimal number of EOG epochs needed to compute SSP projectors. """ diff --git a/mne_bids_pipeline/_config_utils.py b/mne_bids_pipeline/_config_utils.py index 7b555a2a4..784752028 100644 --- a/mne_bids_pipeline/_config_utils.py +++ b/mne_bids_pipeline/_config_utils.py @@ -484,7 +484,7 @@ def get_noise_cov_bids_path( task=cfg.task, acquisition=cfg.acq, run=None, - processing=cfg.proc, + processing="clean", recording=cfg.rec, space=cfg.space, suffix="cov", @@ -638,3 +638,24 @@ def _pl(x, *, non_pl="", pl="s"): """Determine if plural should be used.""" len_x = x if isinstance(x, (int, np.generic)) else len(x) return non_pl if len_x == 1 else pl + + +def _proj_path( + *, + cfg: SimpleNamespace, + subject: str, + session: Optional[str], +) -> BIDSPath: + return BIDSPath( + subject=subject, + session=session, + task=cfg.task, + acquisition=cfg.acq, + recording=cfg.rec, + space=cfg.space, + datatype=cfg.datatype, + root=cfg.deriv_root, + extension=".fif", + suffix="proj", + check=False, + ) diff --git a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py index 5ab1119a6..cb31df04d 100644 --- a/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py +++ b/mne_bids_pipeline/steps/preprocessing/_05_regress_artifact.py @@ -82,7 +82,7 @@ def run_regress_artifact( model.apply(raw, copy=False) if projs: raw.add_proj(projs) - raw.save(out_files[in_key], overwrite=True) + raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size) _update_for_splits(out_files, in_key) model.save(out_files["regress"], overwrite=True) assert len(in_files) == 0, in_files.keys() diff --git a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py index 7aa0e97de..7ec75ef91 100644 --- a/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py +++ b/mne_bids_pipeline/steps/preprocessing/_06b_run_ssp.py @@ -7,13 +7,15 @@ from typing import Optional import mne +import numpy as np from mne import compute_proj_epochs, compute_proj_evoked -from mne.preprocessing import create_ecg_epochs, create_eog_epochs +from mne.preprocessing import find_ecg_events, find_eog_events from mne_bids import BIDSPath from ..._config_utils import ( _bids_kwargs, _pl, + _proj_path, get_runs, get_sessions, get_subjects, @@ -25,6 +27,11 @@ from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs +def _find_ecg_events(raw: mne.io.Raw, ch_name: Optional[str]) -> np.ndarray: + """Wrap find_ecg_events to use the same defaults as create_ecg_events.""" + return find_ecg_events(raw, ch_name=ch_name, l_freq=8, h_freq=16)[0] + + def get_input_fnames_run_ssp( *, cfg: SimpleNamespace, @@ -69,14 +76,7 @@ def run_ssp( # compute SSP on all runs of raw raw_fnames = [in_files.pop(f"raw_run-{run}") for run in cfg.runs] - # when saving proj, use run=None - out_files = dict() - out_files["proj"] = ( - raw_fnames[0] - .copy() - .update(run=None, suffix="proj", split=None, processing=None, check=False) - ) - + out_files = dict(proj=_proj_path(cfg=cfg, subject=subject, session=session)) msg = ( f"Input{_pl(raw_fnames)} ({len(raw_fnames)}): " f'{raw_fnames[0].basename}{_pl(raw_fnames, pl=" ...")}' @@ -93,7 +93,7 @@ def run_ssp( projs = dict() proj_kinds = ("ecg", "eog") rate_names = dict(ecg="heart", eog="blink") - epochs_fun = dict(ecg=create_ecg_epochs, eog=create_eog_epochs) + events_fun = dict(ecg=_find_ecg_events, eog=find_eog_events) minimums = dict(ecg=cfg.min_ecg_epochs, eog=cfg.min_eog_epochs) rejects = dict(ecg=cfg.ssp_reject_ecg, eog=cfg.ssp_reject_eog) avg = dict(ecg=cfg.ecg_proj_from_average, eog=cfg.eog_proj_from_average) @@ -111,17 +111,38 @@ def run_ssp( projs[kind] = [] if not any(n_projs[kind].values()): continue - proj_epochs = epochs_fun[kind]( - raw, - ch_name=ch_name[kind], - decim=cfg.epochs_decim, - ) - n_orig = len(proj_epochs.selection) + events = events_fun[kind](raw=raw, ch_name=ch_name[kind]) + n_orig = len(events) rate = n_orig / raw.times[-1] * 60 bpm_msg = f"{rate:5.1f} bpm" msg = f"Detected {rate_names[kind]} rate: {bpm_msg}" logger.info(**gen_log_kwargs(message=msg)) - # Enough to start + # Enough to create epochs + if len(events) < minimums[kind]: + msg = ( + f"No {kind.upper()} projectors computed: got " + f"{len(events)} original events < {minimums[kind]} {bpm_msg}" + ) + logger.warning(**gen_log_kwargs(message=msg)) + continue + out_files[f"events_{kind}"] = ( + out_files["proj"] + .copy() + .update(suffix=f"{kind}-eve", split=None, check=False, extension=".txt") + ) + mne.write_events(out_files[f"events_{kind}"], events, overwrite=True) + proj_epochs = mne.Epochs( + raw, + events=events, + event_id=events[0, 2], + tmin=-0.5, + tmax=0.5, + proj=False, + baseline=(None, None), + reject_by_annotation=True, + preload=True, + decim=cfg.epochs_decim, + ) if len(proj_epochs) >= minimums[kind]: reject_ = _get_reject( subject=subject, @@ -134,7 +155,6 @@ def run_ssp( proj_epochs.drop_bad(reject=reject_) # Still enough after rejection if len(proj_epochs) >= minimums[kind]: - proj_epochs.apply_baseline((None, None)) use = proj_epochs.average() if avg[kind] else proj_epochs fun = compute_proj_evoked if avg[kind] else compute_proj_epochs desc_prefix = ( diff --git a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py index 0cebb033e..42bf721df 100644 --- a/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py +++ b/mne_bids_pipeline/steps/preprocessing/_07_make_epochs.py @@ -54,7 +54,7 @@ def get_input_fnames_epochs( extension=".fif", datatype=cfg.datatype, root=cfg.deriv_root, - processing="filt", + processing=cfg.processing, ).update(suffix="raw", check=False) # Generate a list of raw data paths (i.e., paths of individual runs) @@ -276,7 +276,7 @@ def _get_events(cfg, subject, session): acquisition=cfg.acq, recording=cfg.rec, space=cfg.space, - processing="filt", + processing=cfg.processing, suffix="raw", extension=".fif", datatype=cfg.datatype, @@ -322,6 +322,7 @@ def get_config( rest_epochs_overlap=config.rest_epochs_overlap, _epochs_split_size=config._epochs_split_size, runs=get_runs(config=config, subject=subject), + processing="filt" if config.regress_artifact is None else "regress", **_bids_kwargs(config=config), ) return cfg diff --git a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py index f4b999cc8..e53a4758f 100644 --- a/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py +++ b/mne_bids_pipeline/steps/preprocessing/_08a_apply_ica.py @@ -21,22 +21,23 @@ from mne_bids import BIDSPath from ..._config_utils import ( - _bids_kwargs, + get_runs_tasks, get_sessions, get_subjects, ) +from ..._import_data import _get_run_rest_noise_path, _import_data_kwargs from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func -from ..._report import _agg_backend, _open_report +from ..._report import _add_raw, _agg_backend, _open_report from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs -def get_input_fnames_apply_ica( +def _ica_paths( *, cfg: SimpleNamespace, subject: str, session: Optional[str], -) -> dict: +): bids_basename = BIDSPath( subject=subject, session=session, @@ -53,15 +54,56 @@ def get_input_fnames_apply_ica( in_files["components"] = bids_basename.copy().update( processing="ica", suffix="components", extension=".tsv" ) - in_files["epochs"] = bids_basename.copy().update(suffix="epo", extension=".fif") + return in_files + + +def _read_ica_and_exclude( + in_files: dict, +) -> None: + ica = read_ica(fname=in_files.pop("ica")) + tsv_data = pd.read_csv(in_files.pop("components"), sep="\t") + ica.exclude = tsv_data.loc[tsv_data["status"] == "bad", "component"].to_list() + return ica + + +def get_input_fnames_apply_ica_epochs( + *, + cfg: SimpleNamespace, + subject: str, + session: Optional[str], +) -> dict: + in_files = _ica_paths(cfg=cfg, subject=subject, session=session) + in_files["epochs"] = in_files["ica"].copy().update(suffix="epo", extension=".fif") _update_for_splits(in_files, "epochs", single=True) return in_files +def get_input_fnames_apply_ica_raw( + *, + cfg: SimpleNamespace, + subject: str, + session: Optional[str], + run: str, + task: Optional[str], +) -> dict: + in_files = _get_run_rest_noise_path( + cfg=cfg, + subject=subject, + session=session, + run=run, + task=task, + kind="filt", + mf_reference_run=cfg.mf_reference_run, + ) + assert len(in_files) + in_files.update(_ica_paths(cfg=cfg, subject=subject, session=session)) + return in_files + + @failsafe_run( - get_input_fnames=get_input_fnames_apply_ica, + get_input_fnames=get_input_fnames_apply_ica_epochs, ) -def apply_ica( +def apply_ica_epochs( *, cfg: SimpleNamespace, exec_params: SimpleNamespace, @@ -85,11 +127,7 @@ def apply_ica( # Load ICA. msg = f"Reading ICA: {in_files['ica']}" logger.debug(**gen_log_kwargs(message=msg)) - ica = read_ica(fname=in_files.pop("ica")) - - # Select ICs to remove. - tsv_data = pd.read_csv(in_files.pop("components"), sep="\t") - ica.exclude = tsv_data.loc[tsv_data["status"] == "bad", "component"].to_list() + ica = _read_ica_and_exclude(in_files) # Load epochs. msg = f'Input: {in_files["epochs"].basename}' @@ -168,16 +206,65 @@ def apply_ica( return _prep_out_files(exec_params=exec_params, out_files=out_files) +@failsafe_run( + get_input_fnames=get_input_fnames_apply_ica_raw, +) +def apply_ica_raw( + *, + cfg: SimpleNamespace, + exec_params: SimpleNamespace, + subject: str, + session: Optional[str], + run: str, + task: Optional[str], + in_files: dict, +) -> dict: + ica = _read_ica_and_exclude(in_files) + in_key = list(in_files)[0] + assert in_key.startswith("raw"), in_key + raw_fname = in_files.pop(in_key) + assert len(in_files) == 0, in_files + out_files = dict() + out_files[in_key] = raw_fname.copy().update(processing="clean") + msg = f"Writing {out_files[in_key].basename} …" + logger.info(**gen_log_kwargs(message=msg)) + raw = mne.io.read_raw_fif(raw_fname, preload=True) + ica.apply(raw) + raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size) + _update_for_splits(out_files, in_key) + # Report + with _open_report( + cfg=cfg, + exec_params=exec_params, + subject=subject, + session=session, + run=run, + task=task, + ) as report: + msg = "Adding cleaned raw data to report" + logger.info(**gen_log_kwargs(message=msg)) + _add_raw( + cfg=cfg, + report=report, + bids_path_in=out_files[in_key], + title="Raw (clean)", + tags=("clean",), + raw=raw, + ) + return _prep_out_files(exec_params=exec_params, out_files=out_files) + + def get_config( *, config: SimpleNamespace, + subject: str, ) -> SimpleNamespace: cfg = SimpleNamespace( baseline=config.baseline, ica_reject=config.ica_reject, - ch_types=config.ch_types, + processing="filt" if config.regress_artifact is None else "regress", _epochs_split_size=config._epochs_split_size, - **_bids_kwargs(config=config), + **_import_data_kwargs(config=config, subject=subject), ) return cfg @@ -190,17 +277,45 @@ def main(*, config: SimpleNamespace) -> None: return with get_parallel_backend(config.exec_params): - parallel, run_func = parallel_func(apply_ica, exec_params=config.exec_params) + # Epochs + parallel, run_func = parallel_func( + apply_ica_epochs, exec_params=config.exec_params + ) logs = parallel( run_func( cfg=get_config( config=config, + subject=subject, + ), + exec_params=config.exec_params, + subject=subject, + session=session, + ) + for subject in get_subjects(config) + for session in get_sessions(config) + ) + # Raw + parallel, run_func = parallel_func( + apply_ica_raw, exec_params=config.exec_params + ) + logs += parallel( + run_func( + cfg=get_config( + config=config, + subject=subject, ), exec_params=config.exec_params, subject=subject, session=session, + run=run, + task=task, ) for subject in get_subjects(config) for session in get_sessions(config) + for run, task in get_runs_tasks( + config=config, + subject=subject, + session=session, + ) ) save_logs(config=config, logs=logs) diff --git a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py index b1eda9cd1..e6fad4b8f 100644 --- a/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py +++ b/mne_bids_pipeline/steps/preprocessing/_08b_apply_ssp.py @@ -9,47 +9,37 @@ from typing import Optional import mne -from mne_bids import BIDSPath from ..._config_utils import ( - _bids_kwargs, + _proj_path, + get_runs_tasks, get_sessions, get_subjects, ) +from ..._import_data import _get_run_rest_noise_path, _import_data_kwargs from ..._logging import gen_log_kwargs, logger from ..._parallel import get_parallel_backend, parallel_func +from ..._report import _add_raw, _open_report from ..._run import _prep_out_files, _update_for_splits, failsafe_run, save_logs -def get_input_fnames_apply_ssp( +def get_input_fnames_apply_ssp_epochs( *, cfg: SimpleNamespace, subject: str, session: Optional[str], ) -> dict: - bids_basename = BIDSPath( - subject=subject, - session=session, - task=cfg.task, - acquisition=cfg.acq, - recording=cfg.rec, - space=cfg.space, - datatype=cfg.datatype, - root=cfg.deriv_root, - extension=".fif", - check=False, - ) in_files = dict() - in_files["epochs"] = bids_basename.copy().update(suffix="epo", check=False) + in_files["proj"] = _proj_path(cfg=cfg, subject=subject, session=session) + in_files["epochs"] = in_files["proj"].copy().update(suffix="epo", check=False) _update_for_splits(in_files, "epochs", single=True) - in_files["proj"] = bids_basename.copy().update(suffix="proj", check=False) return in_files @failsafe_run( - get_input_fnames=get_input_fnames_apply_ssp, + get_input_fnames=get_input_fnames_apply_ssp_epochs, ) -def apply_ssp( +def apply_ssp_epochs( *, cfg: SimpleNamespace, exec_params: SimpleNamespace, @@ -81,13 +71,85 @@ def apply_ssp( return _prep_out_files(exec_params=exec_params, out_files=out_files) +def get_input_fnames_apply_ssp_raw( + *, + cfg: SimpleNamespace, + subject: str, + session: Optional[str], + run: str, + task: Optional[str], +) -> dict: + in_files = _get_run_rest_noise_path( + cfg=cfg, + subject=subject, + session=session, + run=run, + task=task, + kind="filt", + mf_reference_run=cfg.mf_reference_run, + ) + assert len(in_files) + in_files["proj"] = _proj_path(cfg=cfg, subject=subject, session=session) + return in_files + + +@failsafe_run( + get_input_fnames=get_input_fnames_apply_ssp_raw, +) +def apply_ssp_raw( + *, + cfg: SimpleNamespace, + exec_params: SimpleNamespace, + subject: str, + session: Optional[str], + run: str, + task: Optional[str], + in_files: dict, +) -> dict: + projs = mne.read_proj(in_files.pop("proj")) + in_key = list(in_files.keys())[0] + assert in_key.startswith("raw"), in_key + raw_fname = in_files.pop(in_key) + assert len(in_files) == 0, in_files.keys() + raw = mne.io.read_raw_fif(raw_fname) + raw.add_proj(projs) + out_files = dict() + out_files[in_key] = raw_fname.copy().update(processing="clean") + msg = f"Writing {out_files[in_key].basename} …" + logger.info(**gen_log_kwargs(message=msg)) + raw.save(out_files[in_key], overwrite=True, split_size=cfg._raw_split_size) + _update_for_splits(out_files, in_key) + # Report + with _open_report( + cfg=cfg, + exec_params=exec_params, + subject=subject, + session=session, + run=run, + task=task, + ) as report: + msg = "Adding cleaned raw data to report" + logger.info(**gen_log_kwargs(message=msg)) + _add_raw( + cfg=cfg, + report=report, + bids_path_in=out_files[in_key], + title="Raw (clean)", + tags=("clean",), + raw=raw, + ) + return _prep_out_files(exec_params=exec_params, out_files=out_files) + + def get_config( *, config: SimpleNamespace, + subject: str, ) -> SimpleNamespace: cfg = SimpleNamespace( + processing="filt" if config.regress_artifact is None else "regress", _epochs_split_size=config._epochs_split_size, - **_bids_kwargs(config=config), + **_import_data_kwargs(config=config, subject=subject), ) return cfg @@ -100,11 +162,15 @@ def main(*, config: SimpleNamespace) -> None: return with get_parallel_backend(config.exec_params): - parallel, run_func = parallel_func(apply_ssp, exec_params=config.exec_params) + # Epochs + parallel, run_func = parallel_func( + apply_ssp_epochs, exec_params=config.exec_params + ) logs = parallel( run_func( cfg=get_config( config=config, + subject=subject, ), exec_params=config.exec_params, subject=subject, @@ -113,4 +179,28 @@ def main(*, config: SimpleNamespace) -> None: for subject in get_subjects(config) for session in get_sessions(config) ) + # Raw + parallel, run_func = parallel_func( + apply_ssp_raw, exec_params=config.exec_params + ) + logs += parallel( + run_func( + cfg=get_config( + config=config, + subject=subject, + ), + exec_params=config.exec_params, + subject=subject, + session=session, + run=run, + task=task, + ) + for subject in get_subjects(config) + for session in get_sessions(config) + for run, task in get_runs_tasks( + config=config, + subject=subject, + session=session, + ) + ) save_logs(config=config, logs=logs) diff --git a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py index 3584aa72f..d08469b3c 100644 --- a/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py +++ b/mne_bids_pipeline/steps/preprocessing/_09_ptp_reject.py @@ -187,7 +187,7 @@ def drop_ptp( psd = True else: psd = 30 - tags = ("epochs", "reject") + tags = ("epochs", "clean") kind = cfg.reject if isinstance(cfg.reject, str) else "Rejection" title = "Epochs: after cleaning" with _open_report( diff --git a/mne_bids_pipeline/steps/sensor/_06_make_cov.py b/mne_bids_pipeline/steps/sensor/_06_make_cov.py index a9c211df4..5a210d45f 100644 --- a/mne_bids_pipeline/steps/sensor/_06_make_cov.py +++ b/mne_bids_pipeline/steps/sensor/_06_make_cov.py @@ -71,7 +71,7 @@ def get_input_fnames_cov( run=None, recording=cfg.rec, space=cfg.space, - processing="filt", + processing="clean", suffix="raw", extension=".fif", datatype=cfg.datatype, @@ -173,7 +173,7 @@ def retrieve_custom_cov( task=cfg.task, acquisition=cfg.acq, run=None, - processing=cfg.proc, + processing="clean", recording=cfg.rec, space=cfg.space, suffix="ave", diff --git a/mne_bids_pipeline/steps/sensor/_99_group_average.py b/mne_bids_pipeline/steps/sensor/_99_group_average.py index 7ac19e7de..63e4e6ea2 100644 --- a/mne_bids_pipeline/steps/sensor/_99_group_average.py +++ b/mne_bids_pipeline/steps/sensor/_99_group_average.py @@ -107,7 +107,7 @@ def average_evokeds( task=cfg.task, acquisition=cfg.acq, run=None, - processing=cfg.proc, + processing="clean", recording=cfg.rec, space=cfg.space, suffix="ave", diff --git a/mne_bids_pipeline/tests/configs/config_ds000248_base.py b/mne_bids_pipeline/tests/configs/config_ds000248_base.py index 6ffd9644e..9888e1cee 100644 --- a/mne_bids_pipeline/tests/configs/config_ds000248_base.py +++ b/mne_bids_pipeline/tests/configs/config_ds000248_base.py @@ -23,7 +23,7 @@ def noise_cov(bp): # Use pre-stimulus period as noise source - bp = bp.copy().update(processing="clean", suffix="epo") + bp = bp.copy().update(suffix="epo") if not bp.fpath.exists(): bp.update(split="01") epo = mne.read_epochs(bp) From fe56c011c8810f5d1acf04b81b147ed08295cd6f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 7 Feb 2024 09:01:42 -0500 Subject: [PATCH 8/8] BUG: Fix bug with Maxwell step when find_noise_channels_meg=False (#847) --- docs/source/v1.6.md.inc | 1 + mne_bids_pipeline/_import_data.py | 3 ++- mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py | 5 +++-- mne_bids_pipeline/tests/configs/config_ds003392.py | 5 +++-- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/source/v1.6.md.inc b/docs/source/v1.6.md.inc index 3c87dac23..a38943374 100644 --- a/docs/source/v1.6.md.inc +++ b/docs/source/v1.6.md.inc @@ -23,6 +23,7 @@ - Fix bug where EEG `reject` params were not used for `ch_types = ["meg", "eeg"]` (#839 by @larsoner) - Fix bug where implicit `mf_reference_run` could change across invocations of `mne_bids_pipeline`, breaking caching (#839 by @larsoner) - Fix bug where `--no-cache` had no effect (#839 by @larsoner) +- Fix bug where the Maxwell filtering step would fail if [`find_noisy_channels_meg = False`][mne_bids_pipeline._config.find_noisy_channels_meg]` was used (#847 by @larsoner) - Fix bug where raw, empty-room, and custom noise covariances were errantly calculated on data without ICA or SSP applied (#840 by @larsoner) ### :medical_symbol: Code health diff --git a/mne_bids_pipeline/_import_data.py b/mne_bids_pipeline/_import_data.py index be892576b..c3c319f44 100644 --- a/mne_bids_pipeline/_import_data.py +++ b/mne_bids_pipeline/_import_data.py @@ -683,10 +683,11 @@ def _get_run_rest_noise_path( def _get_mf_reference_run_path( + *, cfg: SimpleNamespace, subject: str, session: Optional[str], - add_bads: bool, + add_bads: Optional[bool] = None, ) -> dict: return _get_run_path( cfg=cfg, diff --git a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py index c5b58e2b6..5e5e30318 100644 --- a/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py +++ b/mne_bids_pipeline/steps/preprocessing/_03_maxfilter.py @@ -64,7 +64,7 @@ def get_input_fnames_esss( mf_reference_run=cfg.mf_reference_run, **kwargs, ) - in_files.update(_get_mf_reference_run_path(add_bads=True, **kwargs)) + in_files.update(_get_mf_reference_run_path(**kwargs)) return in_files @@ -241,7 +241,8 @@ def get_input_fnames_maxwell_filter( ) # reference run (used for `destination` and also bad channels for noise) - in_files.update(_get_mf_reference_run_path(add_bads=True, **kwargs)) + # use add_bads=None here to mean "add if autobad is turned on" + in_files.update(_get_mf_reference_run_path(**kwargs)) is_rest_noise = run is None and task in ("noise", "rest") if is_rest_noise: diff --git a/mne_bids_pipeline/tests/configs/config_ds003392.py b/mne_bids_pipeline/tests/configs/config_ds003392.py index 756d36fbc..3f225e50c 100644 --- a/mne_bids_pipeline/tests/configs/config_ds003392.py +++ b/mne_bids_pipeline/tests/configs/config_ds003392.py @@ -6,8 +6,9 @@ subjects = ["01"] task = "localizer" -find_flat_channels_meg = True -find_noisy_channels_meg = True +# usually a good idea to use True, but we know no bads are detected for this dataset +find_flat_channels_meg = False +find_noisy_channels_meg = False use_maxwell_filter = True ch_types = ["meg"]