From c6ad37b467c4b01fdef233509e11bf460b7dae77 Mon Sep 17 00:00:00 2001 From: Gensollen Date: Fri, 23 Feb 2024 14:47:22 +0100 Subject: [PATCH] [FIX] Fixes to `DWIDTI` and `DWIConnectome` pipelines (#1083) * some fixes and improvements to dwi dti * load specific modules rather than everything * add bids filename * improve error messages * simplify pipeline * add statistics to valid suffixes * bad extension spec * simplify statistics_on_atlas * fix test * update tol a bit * update test again * simplify tests * update regexp for preproc brainmask * add diffmodel to suffixes * add missing entity * add suffix parcellation * relax test * update unit tests * small fix in pet query * refactor atlases * use enumerations for atlas names * specify modules for linux based tests also * modify enumeration names * monkeypatch FSLDIR * small fix * small fix again * add unit tests * monkeypatch FSLDIR again * monkeypatch FSLDIR again 2 * refactor tests to avoid FSL errors * fix remaining test * fix bad name * rework documentation * load specific modules rather than everything --- .github/workflows/test_pipelines_anat.yml | 14 +- .../test_pipelines_anat_freesurfer.yml | 4 +- .github/workflows/test_pipelines_dwi.yml | 8 +- clinica/pipelines/dwi_dti/pipeline.py | 36 +- clinica/pipelines/dwi_dti/utils.py | 74 +- .../machine_learning/classification_cli.py | 49 +- clinica/pipelines/machine_learning/input.py | 9 +- .../machine_learning/region_based_io.py | 16 +- .../pet_volume/pet_volume_pipeline.py | 4 +- .../pipelines/pet_volume/pet_volume_utils.py | 17 +- .../t1_volume_parcellation_pipeline.py | 4 +- .../t1_volume_parcellation_utils.py | 16 +- clinica/pydra/pet_volume/pipeline.py | 4 +- clinica/utils/atlas.py | 692 +++++++++--------- clinica/utils/bids.py | 173 +++++ clinica/utils/dwi.py | 4 +- clinica/utils/input_files.py | 115 ++- clinica/utils/inputs.py | 103 +-- clinica/utils/statistics.py | 63 +- docs/Atlases.md | 92 +-- .../dwi/preprocessing/test_phase_diff.py | 1 + .../pipelines/dwi/test_pipelines.py | 51 +- test/unittests/pydra/test_query.py | 34 +- test/unittests/utils/test_atlas.py | 251 +++++++ test/unittests/utils/test_input_files.py | 8 +- test/unittests/utils/test_utils_inputs.py | 14 +- 26 files changed, 1125 insertions(+), 731 deletions(-) create mode 100644 clinica/utils/bids.py create mode 100644 test/unittests/utils/test_atlas.py diff --git a/.github/workflows/test_pipelines_anat.yml b/.github/workflows/test_pipelines_anat.yml index b6eebf841..9a24e8bf0 100644 --- a/.github/workflows/test_pipelines_anat.yml +++ b/.github/workflows/test_pipelines_anat.yml @@ -26,7 +26,7 @@ jobs: source ~/miniconda3/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source "$(brew --prefix)/opt/modules/init/bash" - module load clinica.all + module load clinica/ants/2.4.4 make install cd test poetry run pytest --verbose \ @@ -36,7 +36,7 @@ jobs: --junitxml=./test-reports/non_regression_anat_t1_linear_mac.xml \ --disable-warnings \ ./nonregression/pipelines/anat/test_t1_linear.py - + test-t1-volume-MacOS: runs-on: - self-hosted @@ -51,7 +51,8 @@ jobs: source ~/miniconda3/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source "$(brew --prefix)/opt/modules/init/bash" - module load clinica.all + module load clinica/matlab/2017a + module load clinica/spm12/r7771 make install cd test poetry run pytest --verbose \ @@ -83,7 +84,7 @@ jobs: source /builds/miniconda/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source /usr/local/Modules/init/profile.sh - module load clinica.all + module load clinica/ants/2.4.4 make install cd test poetry run pytest --verbose \ @@ -93,7 +94,7 @@ jobs: --junitxml=./test-reports/non_regression_anat_t1_linear_linux.xml \ --disable-warnings \ ./nonregression/pipelines/anat/test_t1_linear.py - + test-t1-volume-Linux: runs-on: - self-hosted @@ -115,7 +116,8 @@ jobs: source /builds/miniconda/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source /usr/local/Modules/init/profile.sh - module load clinica.all + module load clinica/matlab/2017a + module load clinica/spm12/r7771 make install cd test poetry run pytest --verbose \ diff --git a/.github/workflows/test_pipelines_anat_freesurfer.yml b/.github/workflows/test_pipelines_anat_freesurfer.yml index 88fd5500d..d8a32485b 100644 --- a/.github/workflows/test_pipelines_anat_freesurfer.yml +++ b/.github/workflows/test_pipelines_anat_freesurfer.yml @@ -26,7 +26,7 @@ jobs: source ~/miniconda3/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source "$(brew --prefix)/opt/modules/init/bash" - module load clinica.all + module load clinica/freesurfer/6.0.0 make install cd test poetry run pytest --verbose \ @@ -58,7 +58,7 @@ jobs: source /builds/miniconda/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source /usr/local/Modules/init/profile.sh - module load clinica.all + module load clinica/freesurfer/6.0.0 make install cd test poetry run pytest --verbose \ diff --git a/.github/workflows/test_pipelines_dwi.yml b/.github/workflows/test_pipelines_dwi.yml index d35743e4b..09f6a8e47 100644 --- a/.github/workflows/test_pipelines_dwi.yml +++ b/.github/workflows/test_pipelines_dwi.yml @@ -26,7 +26,9 @@ jobs: source ~/miniconda3/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source "$(brew --prefix)/opt/modules/init/bash" - module load clinica.all + module load clinica/fsl/6.0.5 + module load clinica/ants/2.4.4 + module load clinica/freesurfer/6.0.0 make install cd test poetry run pytest --verbose \ @@ -58,7 +60,9 @@ jobs: source /builds/miniconda/etc/profile.d/conda.sh conda activate "${{ github.workspace }}"/env source /usr/local/Modules/init/profile.sh - module load clinica.all + module load clinica/fsl/6.0.5 + module load clinica/ants/2.4.4 + module load clinica/freesurfer/6.0.0 make install cd test poetry run pytest --verbose \ diff --git a/clinica/pipelines/dwi_dti/pipeline.py b/clinica/pipelines/dwi_dti/pipeline.py index 70ba5be0c..49fe33bb7 100644 --- a/clinica/pipelines/dwi_dti/pipeline.py +++ b/clinica/pipelines/dwi_dti/pipeline.py @@ -252,7 +252,6 @@ def _build_core_nodes(self): from nipype.interfaces.mrtrix3 import TensorMetrics from clinica.utils.check_dependency import check_environment_variable - from clinica.utils.dwi import extract_bids_identifier_from_filename from .utils import ( get_ants_transforms, @@ -262,17 +261,6 @@ def _build_core_nodes(self): statistics_on_atlases, ) - # Nodes creation - # ============== - get_bids_identifier = npe.Node( - interface=nutil.Function( - input_names=["dwi_filename"], - output_names=["bids_identifier"], - function=extract_bids_identifier_from_filename, - ), - name="0-Get_BIDS_Identifier", - ) - get_caps_filenames = npe.Node( interface=nutil.Function( input_names=["caps_dwi_filename"], @@ -342,7 +330,7 @@ def _build_core_nodes(self): scalar_analysis = npe.Node( interface=nutil.Function( - input_names=["in_registered_map", "name_map", "prefix_file"], + input_names=["in_registered_map", "name_map", "dwi_preprocessed_file"], output_names=["atlas_statistics_list"], function=statistics_on_atlases, ), @@ -393,12 +381,6 @@ def _build_core_nodes(self): print_begin_message, [("preproc_dwi", "in_bids_or_caps_file")], ), - # Get BIDS/CAPS identifier from filename - ( - self.input_node, - get_bids_identifier, - [("preproc_dwi", "caps_dwi_filename")], - ), # Convert FSL gradient files (bval/bvec) to MRtrix format ( self.input_node, @@ -483,9 +465,9 @@ def _build_core_nodes(self): ), # Generate regional TSV files ( - get_bids_identifier, + self.input_node, scalar_analysis_fa, - [("bids_identifier", "prefix_file")], + [("preproc_dwi", "dwi_preprocessed_file")], ), ( thres_norm_fa, @@ -493,9 +475,9 @@ def _build_core_nodes(self): [("out_file", "in_registered_map")], ), ( - get_bids_identifier, + self.input_node, scalar_analysis_md, - [("bids_identifier", "prefix_file")], + [("preproc_dwi", "dwi_preprocessed_file")], ), ( thres_norm_md, @@ -503,9 +485,9 @@ def _build_core_nodes(self): [("out_file", "in_registered_map")], ), ( - get_bids_identifier, + self.input_node, scalar_analysis_ad, - [("bids_identifier", "prefix_file")], + [("preproc_dwi", "dwi_preprocessed_file")], ), ( thres_norm_ad, @@ -513,9 +495,9 @@ def _build_core_nodes(self): [("out_file", "in_registered_map")], ), ( - get_bids_identifier, + self.input_node, scalar_analysis_rd, - [("bids_identifier", "prefix_file")], + [("preproc_dwi", "dwi_preprocessed_file")], ), ( thres_norm_rd, diff --git a/clinica/pipelines/dwi_dti/utils.py b/clinica/pipelines/dwi_dti/utils.py index 87b014278..d34b536de 100644 --- a/clinica/pipelines/dwi_dti/utils.py +++ b/clinica/pipelines/dwi_dti/utils.py @@ -1,47 +1,41 @@ -def statistics_on_atlases(in_registered_map, name_map, prefix_file=None): +def statistics_on_atlases( + in_registered_map: str, name_map: str, dwi_preprocessed_file: str +) -> list: """Computes a list of statistics files for each atlas. - Args: - in_registered_map (str): Map already registered on atlases. - name_map (str): Name of the registered map in CAPS format. - prefix_file (Opt[str]): - _space-_map-_statistics.tsv + Parameters + ---------- + in_registered_map : str + Map already registered on atlases. + + name_map : str + Name of the registered map in CAPS format. - Returns: + dwi_preprocessed_file : str + The preprocessed DWI file name which contains the entities to be + used for building the statistics file names. + + Returns + ------- + list of str : List of paths leading to the statistics TSV files. """ from pathlib import Path - from nipype.utils.filemanip import split_filename - - from clinica.utils.atlas import ( - AtlasAbstract, - JHUDTI811mm, - JHUTracts01mm, - JHUTracts251mm, - ) + from clinica.utils.atlas import atlas_factory + from clinica.utils.bids import BIDSFileName from clinica.utils.statistics import statistics_on_atlas - in_atlas_list = [JHUDTI811mm(), JHUTracts01mm(), JHUTracts251mm()] - atlas_statistics_list = [] - for atlas in in_atlas_list: - if not isinstance(atlas, AtlasAbstract): - raise TypeError("Atlas element must be an AtlasAbstract type") - - if prefix_file: - filename = ( - f"{prefix_file}_space-{atlas.get_name_atlas()}" - f"_res-{atlas.get_spatial_resolution()}_map-{name_map}_statistics.tsv" - ) - else: - _, base, _ = split_filename(in_registered_map) - filename = ( - f"{base}_space-{atlas.get_name_atlas()}" - f"_res-{atlas.get_spatial_resolution()}_map-{name_map}_statistics.tsv" - ) - - out_atlas_statistics = str((Path.cwd() / filename).resolve()) + for atlas_name in ("JHUDTI81", "JHUTracts0", "JHUTracts25"): + atlas = atlas_factory(atlas_name) + source = BIDSFileName.from_name(dwi_preprocessed_file) + source.update_entity("space", atlas.name) + source.update_entity("res", atlas.spatial_resolution) + source.update_entity("map", name_map) + source.suffix = "statistics" + source.extension = ".tsv" + out_atlas_statistics = str((Path.cwd() / source.name).resolve()) statistics_on_atlas(in_registered_map, atlas, out_atlas_statistics) atlas_statistics_list.append(out_atlas_statistics) @@ -53,7 +47,7 @@ def get_caps_filenames(caps_dwi_filename: str): import re m = re.search( - r"(sub-[a-zA-Z0-9]+)_(ses-[a-zA-Z0-9]+).*_dwi_space-[a-zA-Z0-9]+", + r"(sub-[a-zA-Z0-9]+)_(ses-[a-zA-Z0-9]+).*_space-[a-zA-Z0-9]+_desc-preproc", caps_dwi_filename, ) if not m: @@ -62,7 +56,7 @@ def get_caps_filenames(caps_dwi_filename: str): ) caps_prefix = m.group(0) - bids_source = f"{m.group(1)}_{m.group(2)}_dwi" + bids_source = f"{m.group(1)}_{m.group(2)}" out_dti = f"{caps_prefix}_model-DTI_diffmodel.nii.gz" out_fa = f"{caps_prefix}_FA.nii.gz" @@ -115,20 +109,22 @@ def rename_into_caps( ) -def print_begin_pipeline(in_bids_or_caps_file): +def print_begin_pipeline(in_bids_or_caps_file: str): from clinica.utils.filemanip import get_subject_id from clinica.utils.ux import print_begin_image print_begin_image(get_subject_id(in_bids_or_caps_file)) -def print_end_pipeline(in_bids_or_caps_file, final_file_1, final_file_2): +def print_end_pipeline(in_bids_or_caps_file: str, final_file_1: str, final_file_2: str): from clinica.utils.filemanip import get_subject_id from clinica.utils.ux import print_end_image print_end_image(get_subject_id(in_bids_or_caps_file)) -def get_ants_transforms(in_affine_transformation, in_bspline_transformation): +def get_ants_transforms( + in_affine_transformation: str, in_bspline_transformation: str +) -> list: """Combine transformations for antsApplyTransforms interface.""" return [in_bspline_transformation, in_affine_transformation] diff --git a/clinica/pipelines/machine_learning/classification_cli.py b/clinica/pipelines/machine_learning/classification_cli.py index 88164311f..2b3b44284 100644 --- a/clinica/pipelines/machine_learning/classification_cli.py +++ b/clinica/pipelines/machine_learning/classification_cli.py @@ -5,7 +5,7 @@ from clinica import option from clinica.pipelines import cli_param from clinica.pipelines.engine import clinica_pipeline -from clinica.utils.atlas import T1_VOLUME_ATLASES +from clinica.utils.atlas import T1AndPetVolumeAtlasName pipeline_name = "machinelearning-classification" @@ -47,7 +47,7 @@ @cli_param.option_group.option( "-atlas", "--atlas", - type=click.Choice(T1_VOLUME_ATLASES), + type=click.Choice(T1AndPetVolumeAtlasName), help="One of the atlases generated by t1-volume or pet-volume pipeline.", ) @option.global_option_group @@ -69,21 +69,47 @@ def cli( ) -> None: """Classification based on machine learning using scikit-learn. + Parameters + ---------- + caps_directory : str - GROUP_LABEL is a string defining the group label for the current analysis, which helps you keep track of different analyses. + group_label : str + String defining the group label for the current analysis, which helps you keep track of different analyses. - The third positional argument defines the type of features for classification. It can be 'RegionBased' or 'VoxelBased'. + orig_input_data : str + Defines the type of features for classification. + It can be 'RegionBased' or 'VoxelBased'. - The fourth positional argument defines the studied modality ('T1w' or 'PET') + image_type : str + Defines the studied modality ('T1w' or 'PET') - The fifth positional argument defines the algorithm. It can be 'DualSVM', 'LogisticRegression' or 'RandomForest'. + algorithm : str + Defines the algorithm. It can be 'DualSVM', 'LogisticRegression' or 'RandomForest'. - The sixth positional argument defines the validation method. It can be 'RepeatedHoldOut' or 'RepeatedKFoldCV'. + validation : str + Defines the validation method. It can be 'RepeatedHoldOut' or 'RepeatedKFoldCV'. - SUBJECTS_VISITS_TSV is a TSV file containing the participant_id and the session_id columns + subjects_visits_tsv : str + TSV file containing the participant_id and the session_id columns. - DIAGNOSES_TSV is a TSV file where the diagnosis for each participant (identified by a participant ID) is reported (e.g. AD, CN). It allows the algorithm to perform the dual classification (between the two labels reported). + diagnoses_tsv : str + TSV file where the diagnosis for each participant (identified by a participant ID) is reported (e.g. AD, CN). + It allows the algorithm to perform the dual classification (between the two labels reported). + output_directory : str + The output folder path. + + acq_label : str, optional + + suvr_reference_region : str, optional + + atlas : str, optional + + n_procs : int, optional + The number of processes to be used by the pipeline. + + Notes + ----- See https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/MachineLearning_Classification/ """ from clinica.utils.exceptions import ClinicaException @@ -115,10 +141,11 @@ def cli( "Clinica will now exit." ) - if algorithm in ["LogisticRegression", "RandomForest"]: + if algorithm in ("LogisticRegression", "RandomForest"): if orig_input_data != "RegionBased" or validation != "RepeatedHoldOut": raise ClinicaException( - "LogisticRegression or RandomForest algorithm can only work on region-based featured or RepeatedHoldOut algorithm. " + "LogisticRegression or RandomForest algorithm can only work " + "on region-based featured or RepeatedHoldOut algorithm. " "Clinica will now exit." ) diff --git a/clinica/pipelines/machine_learning/input.py b/clinica/pipelines/machine_learning/input.py index 53288e65a..1a6578dfa 100644 --- a/clinica/pipelines/machine_learning/input.py +++ b/clinica/pipelines/machine_learning/input.py @@ -248,15 +248,10 @@ def get_default_parameters(): class CAPSRegionBasedInput(CAPSInput): def __init__(self, input_params): - from clinica.utils.atlas import VOLUME_ATLASES + from clinica.utils.atlas import AtlasName super().__init__(input_params) - - if self._input_params["atlas"] not in VOLUME_ATLASES: - raise ValueError( - f"Incorrect atlas name (given value: {self._input_params['atlas']}). " - f"It must be one of {VOLUME_ATLASES}" - ) + AtlasName(self._input_params["atlas"]) def get_images(self): """ diff --git a/clinica/pipelines/machine_learning/region_based_io.py b/clinica/pipelines/machine_learning/region_based_io.py index 2004d9cf2..419200aa3 100644 --- a/clinica/pipelines/machine_learning/region_based_io.py +++ b/clinica/pipelines/machine_learning/region_based_io.py @@ -68,7 +68,7 @@ def features_weights(image_list, dual_coefficients, sv_indices, scaler=None): return weights -def weights_to_nifti(weights, atlas, output_filename): +def weights_to_nifti(weights, atlas: str, output_filename: str): """ Args: @@ -79,18 +79,10 @@ def weights_to_nifti(weights, atlas, output_filename): Returns: """ - from clinica.utils.atlas import AtlasAbstract + from clinica.utils.atlas import atlas_factory - atlas_path = None - atlas_classes = AtlasAbstract.__subclasses__() - for atlas_class in atlas_classes: - if atlas_class.get_name_atlas() == atlas: - atlas_path = atlas_class.get_atlas_labels() - - if not atlas_path: - raise ValueError("Atlas path not found for atlas name " + atlas) - - atlas_image = nib.load(atlas_path) + atlas = atlas_factory(atlas) + atlas_image = nib.load(atlas.labels) atlas_data = atlas_image.get_fdata(dtype="float32") labels = list(set(atlas_data.ravel())) output_image_weights = np.array(atlas_data, dtype="f") diff --git a/clinica/pipelines/pet_volume/pet_volume_pipeline.py b/clinica/pipelines/pet_volume/pet_volume_pipeline.py index 7d1d8a89c..eaa9a5d2b 100644 --- a/clinica/pipelines/pet_volume/pet_volume_pipeline.py +++ b/clinica/pipelines/pet_volume/pet_volume_pipeline.py @@ -19,7 +19,7 @@ class PETVolume(PETPipeline): def _check_pipeline_parameters(self) -> None: """Check pipeline parameters.""" - from clinica.utils.atlas import PET_VOLUME_ATLASES + from clinica.utils.atlas import T1AndPetVolumeAtlasName from clinica.utils.group import check_group_label super()._check_pipeline_parameters() @@ -30,7 +30,7 @@ def _check_pipeline_parameters(self) -> None: self.parameters.setdefault("mask_threshold", 0.3) self.parameters.setdefault("pvc_mask_tissues", [1, 2, 3]) self.parameters.setdefault("smooth", [8]) - self.parameters.setdefault("atlases", PET_VOLUME_ATLASES) + self.parameters.setdefault("atlases", T1AndPetVolumeAtlasName) def _check_custom_dependencies(self) -> None: """Check dependencies that can not be listed in the `info.json` file.""" diff --git a/clinica/pipelines/pet_volume/pet_volume_utils.py b/clinica/pipelines/pet_volume/pet_volume_utils.py index c6a9c02d0..2493332f4 100644 --- a/clinica/pipelines/pet_volume/pet_volume_utils.py +++ b/clinica/pipelines/pet_volume/pet_volume_utils.py @@ -257,26 +257,19 @@ def atlas_statistics(in_image: str, in_atlas_list: list) -> list: atlas_statistics : List List of paths to TSV files. """ - from os import getcwd - from os.path import abspath, join + from pathlib import Path from nipype.utils.filemanip import split_filename - from clinica.utils.atlas import AtlasAbstract from clinica.utils.statistics import statistics_on_atlas orig_dir, base, ext = split_filename(str(in_image)) - atlas_classes = AtlasAbstract.__subclasses__() atlas_statistics_list = [] for atlas in in_atlas_list: - for atlas_class in atlas_classes: - if atlas_class.get_name_atlas() == atlas: - out_atlas_statistics = abspath( - join(getcwd(), base + "_space-" + atlas + "_statistics.tsv") - ) - statistics_on_atlas(str(in_image), atlas_class(), out_atlas_statistics) - atlas_statistics_list.append(out_atlas_statistics) - break + out_atlas_statistics = Path.cwd() / f"{base}_space-{atlas}_statistics.tsv" + statistics_on_atlas(in_image, atlas, out_atlas_statistics) + atlas_statistics_list.append(out_atlas_statistics) + break return atlas_statistics_list diff --git a/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py b/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py index 560802735..0e7a118de 100644 --- a/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py +++ b/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py @@ -16,12 +16,12 @@ def _check_custom_dependencies(self) -> None: def _check_pipeline_parameters(self) -> None: """Check pipeline parameters.""" - from clinica.utils.atlas import T1_VOLUME_ATLASES + from clinica.utils.atlas import T1AndPetVolumeAtlasName from clinica.utils.group import check_group_label self.parameters.setdefault("group_label", None) check_group_label(self.parameters["group_label"]) - self.parameters.setdefault("atlases", T1_VOLUME_ATLASES) + self.parameters.setdefault("atlases", T1AndPetVolumeAtlasName) self.parameters.setdefault("modulate", True) def get_input_fields(self) -> List[str]: diff --git a/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py b/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py index 948b873d2..93bddb318 100644 --- a/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py +++ b/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_utils.py @@ -11,11 +11,10 @@ def atlas_statistics(in_image, atlas_list): Returns: List of paths to TSV files """ - from os.path import abspath, join + from pathlib import Path from nipype.utils.filemanip import split_filename - from clinica.utils.atlas import AtlasAbstract from clinica.utils.filemanip import get_subject_id from clinica.utils.statistics import statistics_on_atlas from clinica.utils.ux import print_end_image @@ -23,15 +22,12 @@ def atlas_statistics(in_image, atlas_list): subject_id = get_subject_id(in_image) orig_dir, base, ext = split_filename(in_image) - atlas_classes = AtlasAbstract.__subclasses__() atlas_statistics_list = [] for atlas in atlas_list: - for atlas_class in atlas_classes: - if atlas_class.get_name_atlas() == atlas: - out_atlas_statistics = abspath( - join(f"./{base}_space-{atlas}_map-graymatter_statistics.tsv") - ) - statistics_on_atlas(in_image, atlas_class(), out_atlas_statistics) - atlas_statistics_list.append(out_atlas_statistics) + out_atlas_statistics = Path( + f"./{base}_space-{atlas}_map-graymatter_statistics.tsv" + ).resolve() + statistics_on_atlas(in_image, atlas, out_atlas_statistics) + atlas_statistics_list.append(out_atlas_statistics) print_end_image(subject_id) return atlas_statistics_list diff --git a/clinica/pydra/pet_volume/pipeline.py b/clinica/pydra/pet_volume/pipeline.py index 30f24894b..dafaf7a6f 100644 --- a/clinica/pydra/pet_volume/pipeline.py +++ b/clinica/pydra/pet_volume/pipeline.py @@ -33,7 +33,7 @@ def _check_pipeline_parameters(parameters: dict) -> dict: dict : Cleaned dictionary of parameters. """ - from clinica.utils.atlas import PET_VOLUME_ATLASES + from clinica.utils.atlas import T1AndPetVolumeAtlasName from clinica.utils.group import check_group_label parameters.setdefault("group_label", None) @@ -46,7 +46,7 @@ def _check_pipeline_parameters(parameters: dict) -> dict: parameters.setdefault("mask_threshold", 0.3) parameters.setdefault("pvc_mask_tissues", [1, 2, 3]) parameters.setdefault("smooth", 8.0) - parameters.setdefault("atlases", PET_VOLUME_ATLASES) + parameters.setdefault("atlases", T1AndPetVolumeAtlasName) return parameters diff --git a/clinica/utils/atlas.py b/clinica/utils/atlas.py index 98b09d033..7214440cb 100644 --- a/clinica/utils/atlas.py +++ b/clinica/utils/atlas.py @@ -12,83 +12,118 @@ """ import abc +from enum import Enum +from pathlib import Path +from typing import Union + +import nibabel as nib +import numpy as np +from nibabel import Nifti1Header + + +class T1AndPetVolumeAtlasName(str, Enum): + """Possible names for T1 / PET atlases.""" + + AAL2 = "AAL2" + AICHA = "AICHA" + HAMMERS = "Hammers" + LPBA40 = "LPBA40" + NEUROMORPHOMETRICS = "Neuromorphometrics" + + +class AtlasName(str, Enum): + """Possible names for atlases.""" + + AAL2 = "AAL2" + AICHA = "AICHA" + HAMMERS = "Hammers" + LPBA40 = "LPBA40" + NEUROMORPHOMETRICS = "Neuromorphometrics" + JHUDTI81 = "JHUDTI81" + JHUTract0 = "JHUTracts0" + JHUTract25 = "JHUTracts25" + JHUTracts50 = "JHUTracts50" + + +def _get_resolution_along_axis(label_image_header: Nifti1Header, axis: int) -> str: + voxels_labels = label_image_header.get_zooms() + if not 0 <= axis < len(voxels_labels): + raise ValueError( + f"The label image has dimension {len(voxels_labels)} and " + f"axis {axis} is therefore not valid. Please use a value " + f"between 0 and {len(voxels_labels) - 1}." + ) + if int(voxels_labels[axis]) == voxels_labels[axis]: + return str(int(voxels_labels[axis])) + return str(voxels_labels[axis]) -T1_VOLUME_ATLASES = [ - "AAL2", - "AICHA", - "Hammers", - "LPBA40", - "Neuromorphometrics", -] -PET_VOLUME_ATLASES = [ - "AAL2", - "AICHA", - "Hammers", - "LPBA40", - "Neuromorphometrics", -] +class BaseAtlas: + """Base class for Atlas handling.""" -DWI_DTI_ATLASES = [ - "JHUDTI81", - "JHUTract0", - "JHUTract25", -] + __metaclass__ = abc.ABCMeta -VOLUME_ATLASES = list(set(T1_VOLUME_ATLASES + PET_VOLUME_ATLASES + DWI_DTI_ATLASES)) + def __init__(self, name: str, roi_filename: str): + self.name = name + self.roi_filename = roi_filename + self.atlas_dir = None + self.atlas_filename = None + @property + @abc.abstractmethod + def expected_checksum(self) -> str: + raise NotImplementedError -class AtlasAbstract: - """Abstract class for Atlas handling. + @property + def atlas_folder(self) -> Path: + return Path(__file__).parent.parent / "resources" / "atlases" - Naming convention for children classes of AtlasAbstract: - [][] - """ + @property + def tsv_roi(self) -> Path: + """Path to the parcellation TSV file. - __metaclass__ = abc.ABCMeta + The TSV file must contain the `roi_value` and `roi_name` columns: - @staticmethod - @abc.abstractmethod - def get_name_atlas(): - """Return the name of the atlas (as defined in BIDS/CAPS specifications).""" + roi_value roi_name + 0 Background + 2001 Precentral_L + [...] [...] + 9170 Vermis_10 + """ + return self.atlas_folder / self.roi_filename - def get_spatial_resolution(self): + @property + def spatial_resolution(self) -> str: """Return the spatial resolution of the atlas (in format "XxXxX" e.g. 1.5x1.5x1.5).""" - import nibabel as nib - - img_labels = nib.load(self.get_atlas_labels()) - voxels_labels = img_labels.header.get_zooms() - # Will display integers without decimals - if int(voxels_labels[0]) == voxels_labels[0]: - s_x = str(int(voxels_labels[0])) - else: - s_x = str(voxels_labels[0]) - if int(voxels_labels[1]) == voxels_labels[1]: - s_y = str(int(voxels_labels[1])) - else: - s_y = str(voxels_labels[1]) - if int(voxels_labels[2]) == voxels_labels[2]: - s_z = str(int(voxels_labels[2])) - else: - s_z = str(voxels_labels[2]) - - return f"{s_x}x{s_y}x{s_z}" - - @staticmethod - @abc.abstractmethod - def get_atlas_labels(): - """Return the image with the different labels/ROIs.""" + img_labels = nib.load(self.labels) + return "x".join( + _get_resolution_along_axis(img_labels.header, axis=axis) + for axis in range(3) + ) - @staticmethod - @abc.abstractmethod - def get_tsv_roi(): - """Return the TSV file containing the ROI (regions of interest) of the atlas.""" + @property + def labels(self) -> Path: + """Path to the parcellation in NIfTI format. - def get_index(self): - import nibabel as nib - import numpy as np + Raises + ------ + IOError : + If the checksum of the parcellation found is different from + the expected checksum. + """ + from .inputs import compute_sha256_hash + + atlas_labels = self.atlas_dir / self.atlas_filename + if (checksum := compute_sha256_hash(atlas_labels)) != self.expected_checksum: + raise IOError( + f"{atlas_labels} has an SHA256 checksum ({checksum}) " + f"differing from expected ({self.expected_checksum}), " + f"file may be corrupted and changed with newer version of FSL." + ) + return atlas_labels - img_labels = nib.load(self.get_atlas_labels()) + def get_index(self) -> np.ndarray: + img_labels = nib.load(self.labels) img_labels = img_labels.get_fdata(dtype="float32") labels = list(set(img_labels.ravel())) index_vector = np.zeros(len(labels)) @@ -97,354 +132,319 @@ def get_index(self): return index_vector -class JHUDTI811mm(AtlasAbstract): - def __init__(self): - AtlasAbstract.__init__(self) +class FSLAtlas(BaseAtlas): + """FSL atlases look for the labels in the FSL folder (requires FSL).""" - @staticmethod - def get_name_atlas(): - return "JHUDTI81" + def __init__(self, name: str, roi_filename: str, atlas_filename: str): + from .check_dependency import check_environment_variable - @staticmethod - def get_atlas_labels(): - import os + super().__init__(name, roi_filename) + self.atlas_filename = atlas_filename + fsl_dir = Path(check_environment_variable("FSLDIR", "FSL")) + self.atlas_dir = fsl_dir / "data" / "atlases" / "JHU" - import nipype.interfaces.fsl as fsl - from .check_dependency import check_environment_variable - from .inputs import _sha256 +class JHUDTI811mm(FSLAtlas): + """JHUDTI811mm atlas. - fsl_dir = check_environment_variable("FSLDIR", "FSL") - atlas_labels = os.path.join( - fsl_dir, "data", "atlases", "JHU", "JHU-ICBM-labels-1mm.nii.gz" + This atlas contains 48 white matter tract labels that were created by manually + segmenting a standard-space average of diffusion MRI tensor maps from 81 subjects. + + References + ---------- + https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Atlases + https://www.sciencedirect.com/science/article/abs/pii/S105381190700688X?via%3Dihub + """ + + def __init__(self): + super().__init__( + name="JHUDTI81", + roi_filename="atlas-JHUDTI81_dseg.tsv", + atlas_filename="JHU-ICBM-labels-1mm.nii.gz", ) - # Adding checksum for updated file with version 6.0.5 of fsl - fsl_atlas_checksums = { - "old": "fac584ec75ff2a8631710d3345df96733ed87d9bde3387f5b462f8d22914ed69", - "new": "3c3f5d2f1250a3df60982acff35a75b99fd549a05d5f8124a63f78221aa0ec16", - } + @property + def expected_checksum(self) -> str: + import nipype.interfaces.fsl as fsl if ["5", "0", "5"] <= fsl.Info.version().split(".") < ["6", "0", "5"]: - expected_checksum = fsl_atlas_checksums["old"] - else: - expected_checksum = fsl_atlas_checksums["new"] + return "fac584ec75ff2a8631710d3345df96733ed87d9bde3387f5b462f8d22914ed69" + return "3c3f5d2f1250a3df60982acff35a75b99fd549a05d5f8124a63f78221aa0ec16" - if _sha256(atlas_labels) != expected_checksum: - raise IOError( - f"{atlas_labels} has an SHA256 checksum ({_sha256(atlas_labels)}) " - f"differing from expected ({expected_checksum}), " - f"file may be corrupted and changed with newer version of FSL." - ) - return atlas_labels - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split +class JHUTracts01mm(FSLAtlas): + """JHUTracts01mm atlas. - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-JHUDTI81_dseg.tsv", - ) + This atlas contains 20 white matter tract labels that were identified probabilistically + by averaging the results of deterministic tractography run on 28 subjects. + Threshold used is 0%. + References + ---------- + https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Atlases + https://shop.elsevier.com/books/mri-atlas-of-human-white-matter/mori/978-0-444-51741-8 + """ -class JHUTracts01mm(AtlasAbstract): def __init__(self): - AtlasAbstract.__init__(self) + super().__init__( + name="JHUTracts0", + roi_filename="atlas-JHUTract_dseg.tsv", + atlas_filename="JHU-ICBM-tracts-maxprob-thr0-1mm.nii.gz", + ) - @staticmethod - def get_name_atlas(): - return "JHUTracts0" + @property + def expected_checksum(self) -> str: + return "eb1de9413a46b02d2b5c7b77852097c6f42c8a5d55a5dbdef949c2e63b95354e" - @staticmethod - def get_atlas_labels(): - import os - from .check_dependency import check_environment_variable - from .inputs import _sha256 +class JHUTracts251mm(FSLAtlas): + """JHUTracts251mm atlas. - fsl_dir = check_environment_variable("FSLDIR", "FSL") - atlas_labels = os.path.join( - fsl_dir, "data", "atlases", "JHU", "JHU-ICBM-tracts-maxprob-thr0-1mm.nii.gz" - ) - expected_checksum = ( - "eb1de9413a46b02d2b5c7b77852097c6f42c8a5d55a5dbdef949c2e63b95354e" - ) - if _sha256(atlas_labels) != expected_checksum: - raise IOError( - f"{atlas_labels} has an SHA256 checksum ({_sha256(atlas_labels)}) " - f"differing from expected ({expected_checksum}), " - f"file may be corrupted and changed with newer version of FSL." - ) - return atlas_labels + This atlas contains 20 white matter tract labels that were identified probabilistically + by averaging the results of deterministic tractography run on 28 subjects. + Threshold used is 25%. - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split + References + ---------- + https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Atlases + https://shop.elsevier.com/books/mri-atlas-of-human-white-matter/mori/978-0-444-51741-8 + """ - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-JHUTract_dseg.tsv", + def __init__(self): + super().__init__( + name="JHUTracts25", + roi_filename="atlas-JHUTract_dseg.tsv", + atlas_filename="JHU-ICBM-tracts-maxprob-thr25-1mm.nii.gz", ) + @property + def expected_checksum(self) -> str: + return "7cd85fa2be1918fc83173e9bc0746031fd4c08d70d6c81b7b9224b5d3da6d8a6" -class JHUTracts251mm(AtlasAbstract): - def __init__(self): - AtlasAbstract.__init__(self) - @staticmethod - def get_name_atlas(): - return "JHUTracts25" +class JHUTracts501mm(FSLAtlas): + """JHUTracts501mm atlas. - @staticmethod - def get_atlas_labels(): - import os + This atlas contains 20 white matter tract labels that were identified probabilistically + by averaging the results of deterministic tractography run on 28 subjects. + Threshold used is 50%. - from .check_dependency import check_environment_variable - from .inputs import _sha256 - - fsl_dir = check_environment_variable("FSLDIR", "FSL") - atlas_labels = os.path.join( - fsl_dir, - "data", - "atlases", - "JHU", - "JHU-ICBM-tracts-maxprob-thr25-1mm.nii.gz", - ) - expected_checksum = ( - "7cd85fa2be1918fc83173e9bc0746031fd4c08d70d6c81b7b9224b5d3da6d8a6" - ) - if _sha256(atlas_labels) != expected_checksum: - raise IOError( - f"{atlas_labels} has an SHA256 checksum ({_sha256(atlas_labels)}) " - f"differing from expected ({expected_checksum}), " - f"file may be corrupted and changed with newer version of FSL." - ) - return atlas_labels - - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split + References + ---------- + https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Atlases + https://shop.elsevier.com/books/mri-atlas-of-human-white-matter/mori/978-0-444-51741-8 + """ - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-JHUTract_dseg.tsv", + def __init__(self): + super().__init__( + name="JHUTracts50", + roi_filename="atlas-JHUTract_dseg.tsv", + atlas_filename="JHU-ICBM-tracts-maxprob-thr50-1mm.nii.gz", ) + @property + def expected_checksum(self) -> str: + return "20ff0216d770686838de26393c0bdac38c8104760631a1a2b5f518bc0bbb470a" -class JHUTracts501mm(AtlasAbstract): - def __init__(self): - AtlasAbstract.__init__(self) - @staticmethod - def get_name_atlas(): - return "JHUTracts50" +class LocalAtlas(BaseAtlas): + """Local atlases will look for labels in the local 'resources' folder. - @staticmethod - def get_atlas_labels(): - import os + More precisely, the labels and TSV files associated with the atlas + are located in the folder `/resources/atlases/`. + """ - from .check_dependency import check_environment_variable - from .inputs import _sha256 - - fsl_dir = check_environment_variable("FSLDIR", "FSL") - atlas_labels = os.path.join( - fsl_dir, - "data", - "atlases", - "JHU", - "JHU-ICBM-tracts-maxprob-thr50-1mm.nii.gz", - ) - expected_checksum = ( - "20ff0216d770686838de26393c0bdac38c8104760631a1a2b5f518bc0bbb470a" - ) - if _sha256(atlas_labels) != expected_checksum: - raise IOError( - f"{atlas_labels} has an SHA256 checksum ({_sha256(atlas_labels)}) " - f"differing from expected ({expected_checksum}), " - f"file may be corrupted and changed with newer version of FSL." - ) - return atlas_labels + def __init__(self, name: str, roi_filename: str, atlas_filename: str): + super().__init__(name, roi_filename) + self.atlas_filename = atlas_filename + self.atlas_dir = Path(__file__).parent.parent / "resources" / "atlases" - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-JHUTract_dseg.tsv", - ) +class AAL2(LocalAtlas): + """AAL2 atlas. + + Anatomical atlas based on a single subject. It is the updated version of AAL, which is + probably the most widely used cortical parcellation map in the neuroimaging literature. + It was built using manual tracing on the spatially normalized single-subject high-resolution + T1 volume in MNI space. It is composed of 120 regions covering the whole cortex as well as + the main subcortical structures. + References + ---------- + https://www.gin.cnrs.fr/en/tools/aal/ + https://www.sciencedirect.com/science/article/abs/pii/S1053811901909784?via%3Dihub + """ -class AAL2(AtlasAbstract): def __init__(self): - AtlasAbstract.__init__(self) - - @staticmethod - def get_name_atlas(): - return "AAL2" - - @staticmethod - def get_atlas_labels(): - from os.path import join, realpath, split - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-AAL2_dseg.nii.gz", + super().__init__( + name="AAL2", + roi_filename="atlas-AAL2_dseg.tsv", + atlas_filename="atlas-AAL2_dseg.nii.gz", ) - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split + @property + def expected_checksum(self) -> str: + return "f6bc698f778a4b383abd3ce355bfd4505c4aa14708e4a7848f8ee928c2b56b37" - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-AAL2_dseg.tsv", - ) +class AICHA(LocalAtlas): + """AICHA atlas. + + Functional atlas based on multiple subjects. It was built using parcellation of group-level + functional connectivity profiles computed from resting-state fMRI data of 281 healthy subjects. + It is composed of 384 regions covering the whole cortex as well as the main subcortical structures. + + References + ---------- + https://www.gin.cnrs.fr/en/tools/aicha/ + https://www.sciencedirect.com/science/article/abs/pii/S0165027015002678?via%3Dihub + """ -class Hammers(AtlasAbstract): def __init__(self): - AtlasAbstract.__init__(self) + super().__init__( + name="AICHA", + roi_filename="atlas-AICHA_dseg.tsv", + atlas_filename="atlas-AICHA_dseg.nii.gz", + ) + + @property + def expected_checksum(self) -> str: + return "cab554d5f546720e60f61f536f82c3d355b31fadb5a4d3ce6a050a606d7ef761" + + +class RemoteAtlas(BaseAtlas): + """Remote atlases will download the labels from the aramislab server.""" - @staticmethod - def get_name_atlas(): - return "Hammers" + def __init__(self, name: str, roi_filename: str, atlas_filename: str): + super().__init__(name, roi_filename) + self.atlas_filename = atlas_filename - @staticmethod - def get_atlas_labels(): + @property + def labels(self) -> Path: from clinica.utils.inputs import RemoteFileStructure, get_file_from_server - hammers_parc = RemoteFileStructure( - filename="atlas-Hammers_dseg.nii.gz", - url="https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/", - checksum="c034a7bce2dcab390a0b72f4e7d04769eb3fe5b990d0e18d89b0ce73339a5376", - ) - return get_file_from_server(hammers_parc) - - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-Hammers_dseg.tsv", + return get_file_from_server( + RemoteFileStructure( + filename=self.atlas_filename, + url="https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/", + checksum=self.expected_checksum, + ) ) -class LPBA40(AtlasAbstract): - def __init__(self): - AtlasAbstract.__init__(self) +class Hammers(RemoteAtlas): + """Hammers atlas. - @staticmethod - def get_name_atlas(): - return "LPBA40" + Anatomical atlas based on multiple subjects. It was built using manual tracing on anatomical + MRI from 30 healthy subjects. The individual subjects parcellations were then registered to MNI + space to generate a probabilistic atlas as well as a maximum probability map. The latter was + used in the present work. It is composed of 69 regions covering the whole cortex as well + as he main subcortical structures. - @staticmethod - def get_atlas_labels(): - from clinica.utils.inputs import RemoteFileStructure, get_file_from_server + References + ---------- + https://neuro-jena.github.io/cat//index.html#DOWNLOAD + https://onlinelibrary.wiley.com/doi/epdf/10.1002/hbm.10123 + """ - lpba40_parc = RemoteFileStructure( - filename="atlas-LPBA40_dseg.nii.gz", - url="https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/", - checksum="20826b572bbbdbcdbf28bbd3801dc0c2fed28d1e54bc4fd5027e64ccc6d50374", - ) - return get_file_from_server(lpba40_parc) - - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-LPBA40_dseg.tsv", + def __init__(self): + super().__init__( + name="Hammers", + roi_filename="atlas-Hammers_dseg.tsv", + atlas_filename="atlas-Hammers_dseg.nii.gz", ) + @property + def expected_checksum(self) -> str: + return "c034a7bce2dcab390a0b72f4e7d04769eb3fe5b990d0e18d89b0ce73339a5376" + + +class LPBA40(RemoteAtlas): + """LPBA40 atlas. + + Anatomical atlas based on multiple subjects. It was built using manual tracing on anatomical + MRI from 40 healthy subjects. The individual subjects parcellations were then registered to MNI + space to generate a maximum probability map. It is composed of 56 regions covering the whole + cortex as well as the main subcortical structures. + + References + ---------- + https://neuro-jena.github.io/cat//index.html#DOWNLOAD + https://www.sciencedirect.com/science/article/abs/pii/S1053811907008099?via%3Dihub + """ -class AICHA(AtlasAbstract): def __init__(self): - AtlasAbstract.__init__(self) - - @staticmethod - def get_name_atlas(): - return "AICHA" - - @staticmethod - def get_atlas_labels(): - from os.path import join, realpath, split - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-AICHA_dseg.nii.gz", + super().__init__( + name="LPBA40", + roi_filename="atlas-LPBA40_dseg.tsv", + atlas_filename="atlas-LPBA40_dseg.nii.gz", ) - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split + @property + def expected_checksum(self) -> str: + return "20826b572bbbdbcdbf28bbd3801dc0c2fed28d1e54bc4fd5027e64ccc6d50374" - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-AICHA_dseg.tsv", - ) +class Neuromorphometrics(RemoteAtlas): + """Neuromorphometrics atlas. + + Anatomical atlas based on multiple subjects. It was built using manual tracing on anatomical + MRI from 30 healthy subjects. The individual subjects parcellations were then registered to + MNI space to generate a maximum probability map. It is composed of 140 regions covering the + whole cortex as well as the main subcortical structures. Data were made available for the + “MICCAI 2012 Grand Challenge and Workshop on Multi-Atlas Labeling”. + + References + ---------- + https://neuro-jena.github.io/cat//index.html#DOWNLOAD + http://masiweb.vuse.vanderbilt.edu/workshop2012/index.php/Challenge_Details + """ -class Neuromorphometrics(AtlasAbstract): def __init__(self): - AtlasAbstract.__init__(self) + super().__init__( + name="Neuromorphometrics", + roi_filename="atlas-Neuromorphometrics_dseg.tsv", + atlas_filename="atlas-Neuromorphometrics_dseg.nii.gz", + ) - @staticmethod - def get_name_atlas(): - return "Neuromorphometrics" + @property + def expected_checksum(self) -> str: + return "19a50136cd2f8a14357a19ad8a1dc4a2ecb6beb3fc16cb5441f4f2ebaf64a9a5" - @staticmethod - def get_atlas_labels(): - from clinica.utils.inputs import RemoteFileStructure, get_file_from_server - neuromorphometrics_parc = RemoteFileStructure( - filename="atlas-Neuromorphometrics_dseg.nii.gz", - url="https://aramislab.paris.inria.fr/files/software/cat12/CAT12-Atlases/", - checksum="19a50136cd2f8a14357a19ad8a1dc4a2ecb6beb3fc16cb5441f4f2ebaf64a9a5", - ) - return get_file_from_server(neuromorphometrics_parc) - - @staticmethod - def get_tsv_roi(): - from os.path import join, realpath, split - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-Neuromorphometrics_dseg.tsv", - ) +def atlas_factory(atlas_name: Union[str, AtlasName, BaseAtlas]) -> BaseAtlas: + """Factory method for atlases. + + Parameters + ---------- + atlas_name : str or AtlasName or atlas instance + If an atlas instance, the instance is returned. + If a string, the corresponding atlas will be returned. + + Returns + ------- + BaseAtlas : + The atlas class corresponding to the provided name. + """ + if isinstance(atlas_name, BaseAtlas): + return atlas_name + if isinstance(atlas_name, str): + atlas_name = AtlasName(atlas_name) + if atlas_name == AtlasName.AAL2: + return AAL2() + if atlas_name == AtlasName.AICHA: + return AICHA() + if atlas_name == AtlasName.HAMMERS: + return Hammers() + if atlas_name == AtlasName.LPBA40: + return LPBA40() + if atlas_name == AtlasName.NEUROMORPHOMETRICS: + return Neuromorphometrics() + if atlas_name == AtlasName.JHUDTI81: + return JHUDTI811mm() + if atlas_name == AtlasName.JHUTract0: + return JHUTracts01mm() + if atlas_name == AtlasName.JHUTract25: + return JHUTracts251mm() + if atlas_name == AtlasName.JHUTracts50: + return JHUTracts501mm() diff --git a/clinica/utils/bids.py b/clinica/utils/bids.py new file mode 100644 index 000000000..c737e3e24 --- /dev/null +++ b/clinica/utils/bids.py @@ -0,0 +1,173 @@ +from dataclasses import dataclass +from enum import Enum +from os import PathLike +from pathlib import Path +from typing import Dict, Tuple, Union + + +class Extension(str, Enum): + """Possible extensions in BIDS file names.""" + + NIIGZ = ".nii.gz" + NII = ".nii" + JSON = ".json" + TSV = ".tsv" + MAT = ".mat" + BVAL = ".bval" + BVEC = ".bvec" + + +class Suffix(str, Enum): + """Possible suffixes in BIDS file names.""" + + DWI = "dwi" + PET = "pet" + T1W = "t1w" + T2W = "t2w" + FLAIR = "flair" + AFFINE = "affine" + PROBABILITY = "probability" + DEFORMATION = "deformation" + PHASEDIFF = "phasediff" + MAGNITUDE1 = "magnitude1" + BRAINMASK = "brainmask" + STATISTICS = "statistics" + DIFFMODEL = "diffmodel" + PARCELLATION = "parcellation" + + +class BIDSLabel(str): + """A BIDS label is a short string which does not contain symbols + used for separating entities in BIDS terminology. + """ + + _min_size = 1 + _max_size = 100 + _forbidden_symbols = ("-", "_", ".") + + def __new__(cls, string): + if not cls._min_size <= len(string) <= cls._max_size: + raise ValueError( + f"A string must be between {cls._min_size} and {cls._max_size} to be a valid BIDS label." + ) + if any([symbol in string for symbol in cls._forbidden_symbols]): + raise ValueError( + f"Provided string '{string}' is not a valid BIDS label because " + f"it contains at least one of these characters: {cls._forbidden_symbols}." + ) + instance = super().__new__(cls, string) + return instance + + +@dataclass +class BIDSFileName: + """Class modeling a file name following the BIDS specifications.""" + + _subject: BIDSLabel + _session: BIDSLabel + _suffix: Suffix + _extension: Extension + entities: Dict[BIDSLabel, BIDSLabel] + + @property + def subject(self) -> str: + return self._subject + + @subject.setter + def subject(self, subject: str): + self._subject = BIDSLabel(subject) + + @property + def session(self) -> str: + return self._session + + @session.setter + def session(self, session: str): + self._session = BIDSLabel(session) + + @property + def suffix(self) -> str: + return self._suffix.value + + @suffix.setter + def suffix(self, suffix: Union[str, Suffix]): + self._suffix = Suffix(suffix) + + @property + def extension(self) -> str: + return self._extension.value + + @extension.setter + def extension(self, extension: Union[str, Extension]): + self._extension = Extension(extension) + + @property + def sub_ses_id(self) -> str: + return f"sub-{self.subject}_ses-{self.session}" + + @property + def name(self) -> str: + if self.entities: + txt = "_".join([f"{k}-{v}" for k, v in self.entities.items()]) + return f"{self.sub_ses_id}_{txt}_{self.suffix}{self.extension}" + return f"{self.sub_ses_id}_{self.suffix}{self.extension}" + + @classmethod + def from_name(cls, filename: Union[str, PathLike]): + filename, extension = split_name_from_extension(filename) + entities, suffix = _tokenize_filename_no_ext(filename) + subject = entities.pop("sub") + session = entities.pop("ses") + return cls( + BIDSLabel(subject), + BIDSLabel(session), + Suffix(suffix), + Extension(extension), + {BIDSLabel(k): BIDSLabel(v) for k, v in entities.items()}, + ) + + def update_entity(self, entity_name: str, entity_value: str): + self.entities[BIDSLabel(entity_name)] = BIDSLabel(entity_value) + + def delete_entity(self, entity_name: str): + entity_name = BIDSLabel(entity_name) + if entity_name in self.entities: + self.entities.pop(entity_name) + + +def _tokenize_filename_no_ext( + filename_without_extension: str, +) -> Tuple[Dict[str, str], str]: + if "_" not in filename_without_extension: + raise ValueError( + f"BIDS file names have entities separated by '_'. " + f"You provided {filename_without_extension}." + ) + tokens = filename_without_extension.split("_") + if len(tokens) < 3: + raise ValueError( + f"A valid BIDS filename should have at least 'sub-XXX_ses-YYY_suffix'. " + f"You provided {filename_without_extension}." + ) + suffix = tokens.pop() + if "-" in suffix: + raise ValueError( + f"When tokenizing the filename {filename_without_extension}, the suffix " + f"found was '{suffix}'. It is invalid because it should not contain a '-' symbol." + ) + if not all(["-" in token for token in tokens]): + raise ValueError( + "The BIDS entities should be key-value pairs separated by a '-' symbol." + f"The entities found are: {tokens}." + ) + entities = {k: v for k, v in [s.split("-") for s in tokens]} + return entities, suffix + + +def split_name_from_extension(filename: Union[str, PathLike]) -> Tuple[str, str]: + extension = "" + filename = Path(filename) + while "." in filename.name: + extension = filename.suffix + extension + filename = Path(filename.stem) + return filename.name, extension diff --git a/clinica/utils/dwi.py b/clinica/utils/dwi.py index 631446a74..9938baa43 100644 --- a/clinica/utils/dwi.py +++ b/clinica/utils/dwi.py @@ -706,7 +706,9 @@ def extract_bids_identifier_from_filename(caps_dwi_filename: str) -> str: raise ValueError( f"Could not extract the BIDS identifier from the DWI input filename {caps_dwi_filename}." ) - return m.group(0).rstrip("_dwi") + identifier = m.group(0).rstrip("_dwi") + + return identifier def rename_files(in_caps_dwi: str, mapping: dict) -> tuple: diff --git a/clinica/utils/input_files.py b/clinica/utils/input_files.py index d902f4f29..968c3d576 100644 --- a/clinica/utils/input_files.py +++ b/clinica/utils/input_files.py @@ -342,18 +342,16 @@ def wrapper_aggregator(*args, **kwargs): @aggregator def t1_volume_native_tpm(tissue_number): - import os + from pathlib import Path from .spm import INDEX_TISSUE_MAP return { - "pattern": os.path.join( - "t1", - "spm", - "segmentation", - "native_space", - f"*_*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_probability.nii*", - ), + "pattern": Path("t1") + / "spm" + / "segmentation" + / "native_space" + / f"*_*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_probability.nii*", "description": f"Tissue probability map {INDEX_TISSUE_MAP[tissue_number]} in native space", "needed_pipeline": "t1-volume-tissue-segmentation", } @@ -361,18 +359,16 @@ def t1_volume_native_tpm(tissue_number): @aggregator def t1_volume_dartel_input_tissue(tissue_number): - import os + from pathlib import Path from .spm import INDEX_TISSUE_MAP return { - "pattern": os.path.join( - "t1", - "spm", - "segmentation", - "dartel_input", - f"*_*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_dartelinput.nii*", - ), + "pattern": Path("t1") + / "spm" + / "segmentation" + / "dartel_input" + / f"*_*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_dartelinput.nii*", "description": f"Dartel input for tissue probability map {INDEX_TISSUE_MAP[tissue_number]} from T1w MRI", "needed_pipeline": "t1-volume-tissue-segmentation", } @@ -380,7 +376,7 @@ def t1_volume_dartel_input_tissue(tissue_number): @aggregator def t1_volume_native_tpm_in_mni(tissue_number, modulation): - import os + from pathlib import Path from .spm import INDEX_TISSUE_MAP @@ -388,13 +384,11 @@ def t1_volume_native_tpm_in_mni(tissue_number, modulation): description_modulation = "with" if modulation else "without" return { - "pattern": os.path.join( - "t1", - "spm", - "segmentation", - "normalized_space", - f"*_*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_space-Ixi549Space_modulated-{pattern_modulation}_probability.nii*", - ), + "pattern": Path("t1") + / "spm" + / "segmentation" + / "normalized_space" + / f"*_*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_space-Ixi549Space_modulated-{pattern_modulation}_probability.nii*", "description": ( f"Tissue probability map {INDEX_TISSUE_MAP[tissue_number]} based on " f"native MRI in MNI space (Ixi549) {description_modulation} modulation." @@ -426,7 +420,7 @@ def t1_volume_template_tpm_in_mni(group_label, tissue_number, modulation, fwhm=N dict : Information dict to be passed to clinica_file_reader. """ - import os + from pathlib import Path from .spm import INDEX_TISSUE_MAP @@ -436,13 +430,11 @@ def t1_volume_template_tpm_in_mni(group_label, tissue_number, modulation, fwhm=N fwhm_description = f"with {fwhm}mm smoothing" if fwhm else "with no smoothing" return { - "pattern": os.path.join( - "t1", - "spm", - "dartel", - f"group-{group_label}", - f"*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_space-Ixi549Space_modulated-{pattern_modulation}{fwhm_key_value}_probability.nii*", - ), + "pattern": Path("t1") + / "spm" + / "dartel" + / f"group-{group_label}" + / f"*_T1w_segm-{INDEX_TISSUE_MAP[tissue_number]}_space-Ixi549Space_modulated-{pattern_modulation}{fwhm_key_value}_probability.nii*", "description": ( f"Tissue probability map {INDEX_TISSUE_MAP[tissue_number]} based " f"on {group_label} template in MNI space (Ixi549) {description_modulation} modulation and {fwhm_description}." @@ -452,16 +444,14 @@ def t1_volume_template_tpm_in_mni(group_label, tissue_number, modulation, fwhm=N def t1_volume_deformation_to_template(group_label): - import os + from pathlib import Path information = { - "pattern": os.path.join( - "t1", - "spm", - "dartel", - f"group-{group_label}", - f"sub-*_ses-*_T1w_target-{group_label}_transformation-forward_deformation.nii*", - ), + "pattern": Path("t1") + / "spm" + / "dartel" + / f"group-{group_label}" + / f"sub-*_ses-*_T1w_target-{group_label}_transformation-forward_deformation.nii*", "description": f"Deformation from native space to group template {group_label} space.", "needed_pipeline": "t1-volume-create-dartel", } @@ -470,14 +460,12 @@ def t1_volume_deformation_to_template(group_label): @aggregator def t1_volume_i_th_iteration_group_template(group_label, i): - import os + from pathlib import Path information = { - "pattern": os.path.join( - f"group-{group_label}", - "t1", - f"group-{group_label}_iteration-{i}_template.nii*", - ), + "pattern": Path(f"group-{group_label}") + / "t1" + / f"group-{group_label}_iteration-{i}_template.nii*", "description": f"Iteration #{i} of Dartel template {group_label}", "needed_pipeline": "t1-volume or t1-volume-create-dartel", } @@ -485,12 +473,12 @@ def t1_volume_i_th_iteration_group_template(group_label, i): def t1_volume_final_group_template(group_label): - import os + from pathlib import Path information = { - "pattern": os.path.join( - f"group-{group_label}", "t1", f"group-{group_label}_template.nii*" - ), + "pattern": Path(f"group-{group_label}") + / "t1" + / f"group-{group_label}_template.nii*", "description": f"T1w template file of group {group_label}", "needed_pipeline": "t1-volume or t1-volume-create-dartel", } @@ -538,7 +526,7 @@ def custom_group(pattern, description): } DWI_PREPROC_BRAINMASK = { - "pattern": "dwi/preprocessing/sub-*_ses-*_dwi_space-*_brainmask.nii*", + "pattern": "dwi/preprocessing/sub-*_ses-*_space-*_brainmask.nii*", "description": "b0 brainmask", "needed_pipeline": "dwi-preprocessing-using-t1 or dwi-preprocessing-using-fieldmap", } @@ -613,7 +601,7 @@ def bids_pet_nii( dict : The query dictionary to get PET scans. """ - import os + from pathlib import Path trc = "" if tracer is None else f"_trc-{tracer.value}" rec = "" if reconstruction is None else f"_rec-{reconstruction.value}" @@ -624,7 +612,7 @@ def bids_pet_nii( description += f" and reconstruction method {reconstruction.value}" return { - "pattern": os.path.join("pet", f"*{trc}{rec}_pet.nii*"), + "pattern": Path("pet") / f"*{trc}{rec}_pet.nii*", "description": description, } @@ -640,7 +628,7 @@ def pet_volume_normalized_suvr_pet( use_pvc_data, fwhm=0, ): - import os + from pathlib import Path if use_brainmasked_image: mask_key_value = "_mask-brain" @@ -666,13 +654,10 @@ def pet_volume_normalized_suvr_pet( suvr_key_value = f"_suvr-{suvr_reference_region}" information = { - "pattern": os.path.join( - "pet", - "preprocessing", - f"group-{group_label}", - f"*_trc-{acq_label}_pet" - f"_space-Ixi549Space{pvc_key_value}{suvr_key_value}{mask_key_value}{fwhm_key_value}_pet.nii*", - ), + "pattern": Path("pet") + / "preprocessing" + / f"group-{group_label}" + / f"*_trc-{acq_label}_pet_space-Ixi549Space{pvc_key_value}{suvr_key_value}{mask_key_value}{fwhm_key_value}_pet.nii*", "description": ( f"{mask_description} SUVR map (using {suvr_reference_region} region) of {acq_label}-PET " f"{pvc_description} and {fwhm_description} in Ixi549Space space based on {group_label} DARTEL template" @@ -686,7 +671,7 @@ def pet_volume_normalized_suvr_pet( def pet_linear_nii(acq_label, suvr_reference_region, uncropped_image): - import os + from pathlib import Path if uncropped_image: description = "" @@ -694,10 +679,8 @@ def pet_linear_nii(acq_label, suvr_reference_region, uncropped_image): description = "_desc-Crop" information = { - "pattern": os.path.join( - "pet_linear", - f"*_trc-{acq_label}_pet_space-MNI152NLin2009cSym{description}_res-1x1x1_suvr-{suvr_reference_region}_pet.nii.gz", - ), + "pattern": Path("pet_linear") + / f"*_trc-{acq_label}_pet_space-MNI152NLin2009cSym{description}_res-1x1x1_suvr-{suvr_reference_region}_pet.nii.gz", "description": "", "needed_pipeline": "pet-linear", } diff --git a/clinica/utils/inputs.py b/clinica/utils/inputs.py index 93bc9a7ae..8325e4037 100644 --- a/clinica/utils/inputs.py +++ b/clinica/utils/inputs.py @@ -5,7 +5,7 @@ from collections import namedtuple from functools import partial from pathlib import Path -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Dict, List, Optional, Tuple, Union RemoteFileStructure = namedtuple("RemoteFileStructure", ["filename", "url", "checksum"]) @@ -80,36 +80,53 @@ def _list_subjects_sub_folders( return subjects_sub_folders -def _common_checks(directory: os.PathLike, folder_type: str) -> None: +def _validate_folder_existence( + directory: Union[str, os.PathLike], folder_type: str +) -> Path: """Utility function which performs checks common to BIDS and CAPS folder structures. Parameters ---------- - directory : PathLike + directory : PathLike or str Directory to check. folder_type : {"BIDS", "CAPS"} The type of directory. + + Returns + ------- + Path : + The directory as a Path. """ from clinica.utils.exceptions import ClinicaBIDSError, ClinicaCAPSError - if not isinstance(directory, (os.PathLike, str)): - raise ValueError( - f"Argument you provided to check_{folder_type.lower()}_folder() is not a string." + try: + directory = Path(directory) + except TypeError: + raise TypeError( + f"Argument you provided to check_{folder_type.lower()}_folder() is not a valid folder name." ) - error = ClinicaBIDSError if folder_type == "BIDS" else ClinicaCAPSError - - if not os.path.isdir(directory): - raise error( + if not directory.is_dir(): + raise (ClinicaBIDSError if folder_type == "BIDS" else ClinicaCAPSError)( f"The {folder_type} directory you gave is not a folder.\n" "Error explanations:\n" f"\t- Clinica expected the following path to be a folder: {directory}\n" "\t- If you gave relative path, did you run Clinica on the good folder?" ) + return directory + + +_validate_bids_folder_existence = partial( + _validate_folder_existence, folder_type="BIDS" +) +_validate_caps_folder_existence = partial( + _validate_folder_existence, folder_type="CAPS" +) + -def check_bids_folder(bids_directory: os.PathLike) -> None: +def check_bids_folder(bids_directory: Union[str, os.PathLike]) -> None: """Check if provided `bids_directory` is a BIDS folder. Parameters @@ -133,8 +150,7 @@ def check_bids_folder(bids_directory: os.PathLike) -> None: """ from clinica.utils.exceptions import ClinicaBIDSError - bids_directory = Path(bids_directory) - _common_checks(bids_directory, "BIDS") + bids_directory = _validate_bids_folder_existence(bids_directory) if (bids_directory / "subjects").is_dir(): raise ClinicaBIDSError( @@ -155,7 +171,7 @@ def check_bids_folder(bids_directory: os.PathLike) -> None: ) -def check_caps_folder(caps_directory: os.PathLike) -> None: +def check_caps_folder(caps_directory: Union[str, os.PathLike]) -> None: """Check if provided `caps_directory`is a CAPS folder. Parameters @@ -181,8 +197,7 @@ def check_caps_folder(caps_directory: os.PathLike) -> None: """ from clinica.utils.exceptions import ClinicaCAPSError - caps_directory = Path(caps_directory) - _common_checks(caps_directory, "CAPS") + caps_directory = _validate_caps_folder_existence(caps_directory) sub_folders = [f for f in caps_directory.iterdir() if f.name.startswith("sub-")] if len(sub_folders) > 0: @@ -471,7 +486,7 @@ def _check_information(information: Dict) -> None: "'information' can only contain the keys 'pattern', 'description' and 'needed_pipeline'" ) - if item["pattern"][0] == "/": + if isinstance(item["pattern"], str) and item["pattern"][0] == "/": raise ValueError( "pattern argument cannot start with char: / (does not work in os.path.join function). " "If you want to indicate the exact name of the file, use the format " @@ -491,7 +506,7 @@ def _check_information(information: Dict) -> None: "'information' can only contain the keys 'pattern', 'description' and 'needed_pipeline'" ) - if information["pattern"][0] == "/": + if isinstance(information["pattern"], str) and information["pattern"][0] == "/": raise ValueError( "pattern argument cannot start with char: / (does not work in os.path.join function). " "If you want to indicate the exact name of the file, use the format " @@ -884,11 +899,12 @@ def _format_and_raise_group_reader_errors( raise ClinicaCAPSError(error_string) -def _sha256(path): +def compute_sha256_hash(file_path: Path) -> str: """Calculate the sha256 hash of the file at path.""" + sha256hash = hashlib.sha256() chunk_size = 8192 - with open(path, "rb") as f: + with open(file_path, "rb") as f: while True: buffer = f.read(chunk_size) if not buffer: @@ -897,7 +913,9 @@ def _sha256(path): return sha256hash.hexdigest() -def fetch_file(remote: RemoteFileStructure, dirname: Optional[str]) -> str: +def fetch_file( + remote: RemoteFileStructure, output_folder: Union[str, os.PathLike] +) -> Path: """Download a specific file and save it into the resources folder of the package. Parameters @@ -905,15 +923,15 @@ def fetch_file(remote: RemoteFileStructure, dirname: Optional[str]) -> str: remote : RemoteFileStructure Structure containing url, filename and checksum. - dirname : str + output_folder : str or PathLike Absolute path where the file will be downloaded. + The name of the file will be the same as the downloaded file. Returns ------- - file_path : str - Absolute file path. + file_path : Path + The path to the downloaded file. """ - import os.path import shutil import ssl from urllib.error import URLError @@ -921,11 +939,14 @@ def fetch_file(remote: RemoteFileStructure, dirname: Optional[str]) -> str: from clinica.utils.stream import cprint - if not os.path.exists(dirname): - cprint(msg="Path to the file does not exist", lvl="warning") + output_folder = Path(output_folder) + if not output_folder.exists(): + cprint( + msg=f"The path {output_folder} to store the downloaded file does not exist", + lvl="warning", + ) cprint(msg="Stop Clinica and handle this error", lvl="warning") - - file_path = os.path.join(dirname, remote.filename) + file_path = output_folder / remote.filename # Download the file from `url` and save it locally under `file_name`: gcontext = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT) gcontext.load_default_certs() @@ -947,8 +968,7 @@ def fetch_file(remote: RemoteFileStructure, dirname: Optional[str]) -> str: except OSError as err: cprint(msg="OS error: {0}".format(err), lvl="error") - checksum = _sha256(file_path) - if remote.checksum != checksum: + if (checksum := compute_sha256_hash(file_path)) != remote.checksum: raise IOError( f"{file_path} has an SHA256 checksum ({checksum}) from expected " f"({remote.checksum}), file may be corrupted." @@ -959,7 +979,7 @@ def fetch_file(remote: RemoteFileStructure, dirname: Optional[str]) -> str: def get_file_from_server( remote_file: RemoteFileStructure, cache_path: Optional[str] = None, -) -> str: +) -> Path: """Download file from server. Parameters @@ -973,25 +993,20 @@ def get_file_from_server( Returns ------- - local_file : str - Path to the downloaded file. + local_file : Path + The path to the downloaded file. """ - import os - from pathlib import Path - from clinica.utils.stream import cprint - home = str(Path.home()) if cache_path: - cache_clinica = os.path.join(home, ".cache", cache_path) + cache_clinica = Path.home() / ".cache" / cache_path else: - cache_clinica = os.path.join(home, ".cache", "clinica", "data") - - os.makedirs(cache_clinica, exist_ok=True) + cache_clinica = Path.home() / ".cache" / "clinica" / "data" - local_file = os.path.join(cache_clinica, remote_file.filename) + cache_clinica.mkdir(exist_ok=True, parents=True) + local_file = cache_clinica / remote_file.filename - if not (os.path.exists(local_file)): + if not local_file.exists(): try: local_file = fetch_file(remote_file, cache_clinica) except IOError as err: diff --git a/clinica/utils/statistics.py b/clinica/utils/statistics.py index 22f1f59e9..9788819ec 100644 --- a/clinica/utils/statistics.py +++ b/clinica/utils/statistics.py @@ -2,54 +2,67 @@ Currently, it contains one function to generate TSV file containing mean map based on a parcellation. """ +from os import PathLike +from pathlib import Path +from typing import Optional, Union +from clinica.utils.atlas import AtlasName, BaseAtlas -def statistics_on_atlas(in_normalized_map, in_atlas, out_file=None): + +def statistics_on_atlas( + in_normalized_map: Union[str, PathLike], + atlas: Union[str, AtlasName, BaseAtlas], + out_file: Optional[Union[str, PathLike]] = None, +) -> str: """Compute statistics of a map on an atlas. Given an atlas image with a set of ROIs, this function computes the mean of a normalized map (e.g. GM segmentation, FA map from DTI, etc.) on each ROI. - Args: - in_normalized_map (str): File containing a scalar image registered - on the atlas. - in_atlas (:obj: AbstractClass): An atlas with a set of ROI. These ROI - are used to compute statistics. - out_file (Optional[str]): Name of the output file. + Parameters + ---------- + in_normalized_map : str + File containing a scalar image registered on the atlas. - Returns: - out_file (str): TSV file containing the statistics (content of the - columns: label, mean scalar, std of the scalar', number of voxels). - """ - import os.path as op + atlas : BaseAtlas or AtlasName or str + An atlas with a set of ROI. These ROI are used to compute statistics. + If a string is given, it is assumed to be the name of the atlas to be used. + out_file : str, optional + Name of the output file. + + Returns + ------- + out_file : str + TSV file containing the statistics (content of the columns: label, + mean scalar, std of the scalar', number of voxels). + """ import nibabel as nib import numpy as np - import pandas + import pandas as pd - from clinica.utils.atlas import AtlasAbstract from clinica.utils.stream import cprint - if not isinstance(in_atlas, AtlasAbstract): - raise Exception("Atlas element must be an AtlasAbstract type") + from .atlas import atlas_factory + atlas = atlas_factory(atlas) + in_normalized_map = Path(in_normalized_map) if not out_file: - fname, ext = op.splitext(op.basename(in_normalized_map)) + filename, ext = in_normalized_map.stem, in_normalized_map.suffix if ext == ".gz": - fname, _ = op.splitext(fname) - out_file = op.abspath(f"{fname}_statistics_{in_atlas.get_name_atlas()}.tsv") + filename = Path(filename).stem + out_file = Path(f"{filename}_statistics_{atlas.name}.tsv").resolve() - atlas_labels = nib.load(in_atlas.get_atlas_labels()) + atlas_labels = nib.load(atlas.labels) atlas_labels_data = atlas_labels.get_fdata(dtype="float32") img = nib.load(in_normalized_map) img_data = img.get_fdata(dtype="float32") - atlas_correspondence = pandas.read_csv(in_atlas.get_tsv_roi(), sep="\t") + atlas_correspondence = pd.read_csv(atlas.tsv_roi, sep="\t") label_name = list(atlas_correspondence.roi_name) - label_value = list( - atlas_correspondence.roi_value - ) # TODO create roi_value column in lut_*.txt and remove irrelevant RGB information + # TODO create roi_value column in lut_*.txt and remove irrelevant RGB information + label_value = list(atlas_correspondence.roi_value) mean_signal_value = [] for label in label_value: @@ -59,7 +72,7 @@ def statistics_on_atlas(in_normalized_map, in_atlas, out_file=None): mean_signal_value.append(np.sum(masked_data) / np.sum(current_mask_label)) try: - data = pandas.DataFrame( + data = pd.DataFrame( {"label_name": label_name, "mean_scalar": mean_signal_value} ) data.to_csv(out_file, sep="\t", index=True, encoding="utf-8") diff --git a/docs/Atlases.md b/docs/Atlases.md index 4e91e592a..84d84eada 100644 --- a/docs/Atlases.md +++ b/docs/Atlases.md @@ -34,12 +34,12 @@ It is composed of 69 regions covering the whole cortex as well as he main subcor is an anatomical atlas based on multiple subjects. It was built using manual tracing on anatomical MRI from 40 healthy subjects. The individual subjects parcellations were then registered to MNI space to generate a maximum probability map. -It is composed of 56 regions covering the whole cortex as well as he main subcortical structures. +It is composed of 56 regions covering the whole cortex as well as the main subcortical structures. - [Neuromorphometrics](http://www.neuro.uni-jena.de/cat/index.html#DOWNLOAD) is an anatomical atlas based on multiple subjects. It was built using manual tracing on anatomical MRI from 30 healthy subjects. The individual subjects parcellations were then registered to MNI space to generate a maximum probability map. -It is composed of 140 regions covering the whole cortex as well as he main subcortical structures. +It is composed of 140 regions covering the whole cortex as well as the main subcortical structures. Data were made available for the “[MICCAI 2012 Grand Challenge and Workshop on Multi-Atlas Labeling](http://masiweb.vuse.vanderbilt.edu/workshop2012/index.php/Challenge_Details)”. The main difference between LBPA40, Hammers and Neuromorphometrics atlases is the degree of detail (i.e. the number of regions) of the anatomical parcellation. @@ -73,76 +73,28 @@ It was built on anatomical MRI of 24 healthy subjects from which 74 cortical ROI !!! tip Easily access the papers cited on this page on [Zotero](https://www.zotero.org/groups/2240070/clinica_aramislab/items/collectionKey/JPGDLCMZ). -## Tutorial: How to add a new volume atlas to Clinica? +## How to add a new volume atlas to Clinica? -It is possible to run the [`t1-volume`](../T1_Volume) and [`pet-volume`](../PET_Volume) pipelines using a custom parcellation. +It is possible to run the [`t1-volume`](../T1_Volume) and [`pet-volume`](../PET_Volume) pipelines using a custom parcellation. To do so: - Install Clinica following the [developer instructions](../Installation/#install-clinica); -- In the `/clinica/utils/atlas.py` file, modify the following two elements: - - The label of the volume atlas that will be stored in CAPS filename(s): - - ```python - T1_VOLUME_ATLASES = [ - "AAL2", - "AICHA", - "Hammers", - "LPBA40", - "Neuromorphometrics", - ] - ``` - - Simply define a new label that will be your new volume. - `T1_VOLUME_ATLASES` is used by all the command-line interfaces using atlases from the [`t1-volume`](../T1_Volume) pipeline so you do not need to modify the pipelines' CLI to make this new region appear. - The same rationale applies to `PET_VOLUME_ATLASES`. - - - Create a new class inherited from `AtlasAbstract` and fill the three compulsory methods. - If we take for instance the AAL2 parcellation: - - ```python - class AAL2(AtlasAbstract): - def __init__(self): - AtlasAbstract.__init__(self) - - @staticmethod - def get_name_atlas(): - return "AAL2" - - @staticmethod - def get_atlas_labels(): - from os.path import join, split, realpath - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-AAL2_dseg.nii.gz", - ) - - @staticmethod - def get_tsv_roi(): - from os.path import join, split, realpath - - return join( - split(realpath(__file__))[0], - "..", - "resources", - "atlases", - "atlas-AAL2_dseg.tsv", - ) - ``` - - The string returned by the `get_name_atlas()` method must match the label given in the `{T1|PET}_VOLUME_ATLASES` list. - The `get_atlas_labels()` method must return the path to the parcellation in NIfTI format while the `get_tsv_roi()` method must return the path to a TSV file. - In this example, the labels and TSV files associated with the `AAL2` atlas are located at `/resources/atlases/atlas-AAL2_dseg.{nii.gz|tsv}`. - Finally, the TSV file must contain the `roi_value` and `roi_name` columns and looks like: - - ```text - roi_value roi_name - 0   Background - 2001  Precentral_L - [...] [...] - 9170  Vermis_10 - ``` + - In the `/clinica/utils/atlas.py` file, modify the following two elements: + - The label of the volume atlas that will be stored in CAPS filename(s): + + ```python + class T1AndPetVolumeAtlasName(str, Enum): + """Possible names for T1 / PET atlases.""" + + AAL2 = "AAL2" + AICHA = "AICHA" + HAMMERS = "Hammers" + LPBA40 = "LPBA40" + NEUROMORPHOMETRICS = "Neuromorphometrics" + ``` + + Simply define a new label by adding a new variant to this enumeration. + The `T1AndPetVolumeAtlasName` enumeration is used by all the command-line interfaces using atlases from the [`t1-volume`](../T1_Volume) and [`pet-volume`](../PET_Volume) pipelines, so you do not need to modify the pipelines' CLI to make this new region appear. + + - Create a new class inheriting from `BaseAtlas` and implementing the required interface. diff --git a/test/nonregression/pipelines/dwi/preprocessing/test_phase_diff.py b/test/nonregression/pipelines/dwi/preprocessing/test_phase_diff.py index 402bae0f7..adc948973 100644 --- a/test/nonregression/pipelines/dwi/preprocessing/test_phase_diff.py +++ b/test/nonregression/pipelines/dwi/preprocessing/test_phase_diff.py @@ -59,6 +59,7 @@ def test_dwi_compute_reference_b0(cmdopt, tmp_path): phase_encoding_direction = bids_dir_to_fsl_dir(phase_encoding_direction) wf = compute_reference_b0( + base_dir=str(tmp_dir), b_value_threshold=5.0, use_cuda=False, initrand=False, diff --git a/test/nonregression/pipelines/dwi/test_pipelines.py b/test/nonregression/pipelines/dwi/test_pipelines.py index 22da85c5f..bdd341a77 100644 --- a/test/nonregression/pipelines/dwi/test_pipelines.py +++ b/test/nonregression/pipelines/dwi/test_pipelines.py @@ -8,6 +8,7 @@ import numpy as np import pandas as pd import pytest +from numpy.testing import assert_array_almost_equal @pytest.mark.slow @@ -30,6 +31,7 @@ def run_dwi_dti( input_dir: Path, output_dir: Path, ref_dir: Path, working_dir: Path ) -> None: from clinica.pipelines.dwi_dti.pipeline import DwiDti + from clinica.utils.bids import BIDSFileName from clinica.utils.dwi import DTIBasedMeasure caps_dir = output_dir / "caps" @@ -46,31 +48,24 @@ def run_dwi_dti( pipeline.build() pipeline.run(plugin="MultiProc", plugin_args={"n_procs": 4}, bypass_check=True) - subject_id = "sub-PREVDEMALS0010025PG" - entities = "ses-M000_dwi_space-JHUDTI81_res-1x1x1" + filename = BIDSFileName.from_name( + "sub-01_ses-M000_space-JHUDTI81_desc-preproc_res-1x1x1_statistics.tsv" + ) output = ( caps_dir / "subjects" - / subject_id + / "sub-01" / "ses-M000" / "dwi" / "dti_based_processing" / "atlas_statistics" ) for measure in DTIBasedMeasure: - out_csv = pd.read_csv( - output / f"{subject_id}_{entities}_map-{measure.value}_statistics.tsv", - sep="\t", - ) - ref_csv = pd.read_csv( - ref_dir / f"{subject_id}_{entities}_map-{measure.value}_statistics.tsv", - sep="\t", - ) - assert np.allclose( - np.array(out_csv.mean_scalar), - np.array(ref_csv.mean_scalar), - rtol=0.025, - equal_nan=True, + filename.update_entity("map", measure.value) + out_csv = pd.read_csv(output / filename.name, sep="\t") + ref_csv = pd.read_csv(ref_dir / filename.name, sep="\t") + assert_array_almost_equal( + np.array(out_csv.mean_scalar), np.array(ref_csv.mean_scalar), decimal=2 ) @@ -78,6 +73,7 @@ def run_dwi_connectome( input_dir: Path, output_dir: Path, ref_dir: Path, working_dir: Path ) -> None: from clinica.pipelines.dwi_connectome.pipeline import DwiConnectome + from clinica.utils.bids import BIDSFileName caps_dir = output_dir / "caps" @@ -94,27 +90,28 @@ def run_dwi_connectome( pipeline.build() pipeline.run(plugin="MultiProc", plugin_args={"n_procs": 4}, bypass_check=True) - session_id = "ses-M000" - subject_id = "sub-PREVDEMALS0010025PG" - suffix = "dwi_space-b0_model-CSD_diffmodel.nii.gz" + filename = BIDSFileName.from_name( + "sub-01_ses-M000_space-b0_desc-preproc_model-CSD_diffmodel.nii.gz" + ) output_folder = ( caps_dir / "subjects" - / subject_id - / session_id + / "sub-01" + / "ses-M000" / "dwi" / "connectome_based_processing" ) - out_fod_file = output_folder / f"{subject_id}_{session_id}_{suffix}" - ref_fod_file = ref_dir / f"{subject_id}_{session_id}_{suffix}" + out_fod_file = output_folder / filename.name + ref_fod_file = ref_dir / filename.name assert similarity_measure(out_fod_file, ref_fod_file, 0.97) for atlas in ("desikan", "destrieux"): + filename.update_entity("atlas", atlas) + filename.delete_entity("model") + filename.suffix = "parcellation" assert similarity_measure( - output_folder - / f"{subject_id}_{session_id}_dwi_space-b0_atlas-{atlas}_parcellation.nii.gz", - ref_dir - / f"{subject_id}_{session_id}_dwi_space-b0_atlas-{atlas}_parcellation.nii.gz", + output_folder / filename.name, + ref_dir / filename.name, 0.955, ) diff --git a/test/unittests/pydra/test_query.py b/test/unittests/pydra/test_query.py index d7846bc76..f04122b2e 100644 --- a/test/unittests/pydra/test_query.py +++ b/test/unittests/pydra/test_query.py @@ -1,3 +1,5 @@ +from pathlib import Path + import pytest from clinica.pydra.query import BIDSQuery, CAPSFileQuery, CAPSGroupQuery, Query @@ -39,12 +41,20 @@ def test_caps_file_query(): assert q.query == { "mask_tissues": [ { - "pattern": "t1/spm/segmentation/normalized_space/*_*_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii*", + "pattern": Path("t1") + / "spm" + / "segmentation" + / "normalized_space" + / "*_*_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii*", "description": "Tissue probability map graymatter based on native MRI in MNI space (Ixi549) without modulation.", "needed_pipeline": "t1-volume-tissue-segmentation", }, { - "pattern": "t1/spm/segmentation/normalized_space/*_*_T1w_segm-whitematter_space-Ixi549Space_modulated-off_probability.nii*", + "pattern": Path("t1") + / "spm" + / "segmentation" + / "normalized_space" + / "*_*_T1w_segm-whitematter_space-Ixi549Space_modulated-off_probability.nii*", "description": "Tissue probability map whitematter based on native MRI in MNI space (Ixi549) without modulation.", "needed_pipeline": "t1-volume-tissue-segmentation", }, @@ -61,18 +71,30 @@ def test_caps_file_query(): assert q.query == { "mask_tissues": [ { - "pattern": "t1/spm/segmentation/normalized_space/*_*_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii*", + "pattern": Path("t1") + / "spm" + / "segmentation" + / "normalized_space" + / "*_*_T1w_segm-graymatter_space-Ixi549Space_modulated-off_probability.nii*", "description": "Tissue probability map graymatter based on native MRI in MNI space (Ixi549) without modulation.", "needed_pipeline": "t1-volume-tissue-segmentation", }, { - "pattern": "t1/spm/segmentation/normalized_space/*_*_T1w_segm-whitematter_space-Ixi549Space_modulated-off_probability.nii*", + "pattern": Path("t1") + / "spm" + / "segmentation" + / "normalized_space" + / "*_*_T1w_segm-whitematter_space-Ixi549Space_modulated-off_probability.nii*", "description": "Tissue probability map whitematter based on native MRI in MNI space (Ixi549) without modulation.", "needed_pipeline": "t1-volume-tissue-segmentation", }, ], "flow_fields": { - "pattern": "t1/spm/dartel/group-UnitTest/sub-*_ses-*_T1w_target-UnitTest_transformation-forward_deformation.nii*", + "pattern": Path("t1") + / "spm" + / "dartel" + / "group-UnitTest" + / "sub-*_ses-*_T1w_target-UnitTest_transformation-forward_deformation.nii*", "description": "Deformation from native space to group template UnitTest space.", "needed_pipeline": "t1-volume-create-dartel", }, @@ -84,7 +106,7 @@ def test_caps_group_query(): assert len(q) == 1 assert q.query == { "dartel_template": { - "pattern": "group-UnitTest/t1/group-UnitTest_template.nii*", + "pattern": Path("group-UnitTest") / "t1" / "group-UnitTest_template.nii*", "description": "T1w template file of group UnitTest", "needed_pipeline": "t1-volume or t1-volume-create-dartel", } diff --git a/test/unittests/utils/test_atlas.py b/test/unittests/utils/test_atlas.py new file mode 100644 index 000000000..aa4288c1f --- /dev/null +++ b/test/unittests/utils/test_atlas.py @@ -0,0 +1,251 @@ +import re + +import nibabel as nib +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from clinica.utils.atlas import ( + AAL2, + AICHA, + LPBA40, + Hammers, + JHUDTI811mm, + JHUTracts01mm, + JHUTracts251mm, + JHUTracts501mm, + Neuromorphometrics, +) + + +def test_atlas_factory_error(): + from clinica.utils.atlas import atlas_factory + + with pytest.raises( + ValueError, + match="'foo' is not a valid AtlasName", + ): + atlas_factory("foo") + + +@pytest.mark.parametrize( + "atlas_name,atlas", + [ + ("AAL2", AAL2), + ("AICHA", AICHA), + ("Hammers", Hammers), + ("LPBA40", LPBA40), + ("Neuromorphometrics", Neuromorphometrics), + ("JHUDTI81", JHUDTI811mm), + ("JHUTracts0", JHUTracts01mm), + ("JHUTracts25", JHUTracts251mm), + ("JHUTracts50", JHUTracts501mm), + ], +) +def test_atlas_factory(tmp_path, monkeypatch, atlas_name, atlas): + from clinica.utils.atlas import atlas_factory + + monkeypatch.setenv("FSLDIR", str(tmp_path)) + assert isinstance(atlas_factory(atlas_name), atlas) + + +@pytest.mark.parametrize( + ( + "atlas,expected_name,expected_checksum,expected_atlas_filename," + "expected_roi_filename,expected_resolution,expected_size" + ), + [ + ( + Neuromorphometrics(), + "Neuromorphometrics", + "19a50136cd2f8a14357a19ad8a1dc4a2ecb6beb3fc16cb5441f4f2ebaf64a9a5", + "atlas-Neuromorphometrics_dseg.nii.gz", + "atlas-Neuromorphometrics_dseg.tsv", + "1.5x1.5x1.5", + 141, + ), + ( + LPBA40(), + "LPBA40", + "20826b572bbbdbcdbf28bbd3801dc0c2fed28d1e54bc4fd5027e64ccc6d50374", + "atlas-LPBA40_dseg.nii.gz", + "atlas-LPBA40_dseg.tsv", + "1.5x1.5x1.5", + 57, + ), + ( + Hammers(), + "Hammers", + "c034a7bce2dcab390a0b72f4e7d04769eb3fe5b990d0e18d89b0ce73339a5376", + "atlas-Hammers_dseg.nii.gz", + "atlas-Hammers_dseg.tsv", + "1.5x1.5x1.5", + 69, + ), + ( + AICHA(), + "AICHA", + "cab554d5f546720e60f61f536f82c3d355b31fadb5a4d3ce6a050a606d7ef761", + "atlas-AICHA_dseg.nii.gz", + "atlas-AICHA_dseg.tsv", + "1.5x1.5x1.5", + 385, + ), + ( + AAL2(), + "AAL2", + "f6bc698f778a4b383abd3ce355bfd4505c4aa14708e4a7848f8ee928c2b56b37", + "atlas-AAL2_dseg.nii.gz", + "atlas-AAL2_dseg.tsv", + "1.5x1.5x1.5", + 121, + ), + ], + ids=( + "Neuromorphometrics", + "LPBA40", + "Hammers", + "AICHA", + "AAL2", + ), +) +def test_atlases( + atlas, + expected_name, + expected_checksum, + expected_atlas_filename, + expected_roi_filename, + expected_resolution, + expected_size, +): + assert atlas.name == expected_name + assert atlas.expected_checksum == expected_checksum + assert atlas.atlas_filename == expected_atlas_filename + assert atlas.roi_filename == expected_roi_filename + assert atlas.tsv_roi.exists() + assert atlas.spatial_resolution == expected_resolution + assert atlas.atlas_folder.exists() + assert_array_equal(atlas.get_index(), np.arange(expected_size)) + + +@pytest.fixture +def atlas(expected_name, tmp_path, monkeypatch): + from clinica.utils.atlas import atlas_factory + + monkeypatch.setenv("FSLDIR", str(tmp_path)) + return atlas_factory(expected_name) + + +@pytest.mark.parametrize( + "expected_name,expected_checksum,expected_atlas_filename,expected_roi_filename,expected_resolution,expected_size", + [ + ( + "JHUTracts50", + "20ff0216d770686838de26393c0bdac38c8104760631a1a2b5f518bc0bbb470a", + "JHU-ICBM-tracts-maxprob-thr50-1mm.nii.gz", + "atlas-JHUTract_dseg.tsv", + "1x1x1", + 18, + ), + ( + "JHUTracts25", + "7cd85fa2be1918fc83173e9bc0746031fd4c08d70d6c81b7b9224b5d3da6d8a6", + "JHU-ICBM-tracts-maxprob-thr25-1mm.nii.gz", + "atlas-JHUTract_dseg.tsv", + "1x1x1", + 21, + ), + ( + "JHUTracts0", + "eb1de9413a46b02d2b5c7b77852097c6f42c8a5d55a5dbdef949c2e63b95354e", + "JHU-ICBM-tracts-maxprob-thr0-1mm.nii.gz", + "atlas-JHUTract_dseg.tsv", + "1x1x1", + 21, + ), + ( + "JHUDTI81", + "3c3f5d2f1250a3df60982acff35a75b99fd549a05d5f8124a63f78221aa0ec16", + "JHU-ICBM-labels-1mm.nii.gz", + "atlas-JHUDTI81_dseg.tsv", + "1x1x1", + 51, + ), + ], + ids=("JHUTracts50", "JHUTracts25", "JHUTracts0", "JHUDTI81"), +) +def test_atlases_fsl( + tmp_path, + atlas, + expected_name, + expected_checksum, + expected_atlas_filename, + expected_roi_filename, + expected_resolution, + expected_size, + mocker, +): + mocker.patch("nipype.interfaces.fsl.Info.version", return_value="6.0.5") + mocked_fsl_dir = tmp_path / "data" / "atlases" / "JHU" + mocked_fsl_dir.mkdir(parents=True) + (mocked_fsl_dir / expected_atlas_filename).touch() + + assert atlas.name == expected_name + assert atlas.expected_checksum == expected_checksum + assert atlas.atlas_filename == expected_atlas_filename + assert atlas.roi_filename == expected_roi_filename + assert atlas.tsv_roi.exists() + assert atlas.atlas_folder.exists() + + +@pytest.mark.parametrize( + "expected_name", + [ + "AAL2", + "JHUDTI81", + "JHUTracts0", + "JHUTracts25", + "JHUTracts50", + "AICHA", + ], + ids=["AAL2", "JHUDTI81", "JHUTracts0", "JHUTracts25", "JHUTracts50", "AICHA"], +) +def test_atlas_checksum_error(atlas, expected_name, mocker): + mocker.patch("nipype.interfaces.fsl.Info.version", return_value="6.0.5") + mocker.patch("clinica.utils.inputs.compute_sha256_hash", return_value="123") + + with pytest.raises( + IOError, + match=re.escape("has an SHA256 checksum (123) differing from expected"), + ): + atlas.labels + + +@pytest.fixture +def test_image() -> nib.Nifti1Image: + rng = np.random.RandomState(42) + affine = np.zeros((4, 4), float) + np.fill_diagonal(affine, [1, 3, 2.33, 1]) + return nib.Nifti1Image(rng.random((2, 2, 2)), affine=affine) + + +@pytest.mark.parametrize("axis,expected_resolution", [(0, "1"), (1, "3"), (2, "2.33")]) +def test_get_resolution_along_axis(test_image, axis, expected_resolution): + from clinica.utils.atlas import _get_resolution_along_axis + + assert ( + _get_resolution_along_axis(test_image.header, axis=axis) == expected_resolution + ) + + +def test_get_resolution_along_axis_error(test_image): + from clinica.utils.atlas import _get_resolution_along_axis + + with pytest.raises( + ValueError, + match=( + "The label image has dimension 3 and axis 3 is therefore not valid. " + "Please use a value between 0 and 2." + ), + ): + _get_resolution_along_axis(test_image.header, axis=3) diff --git a/test/unittests/utils/test_input_files.py b/test/unittests/utils/test_input_files.py index a84596c70..4627cf3d8 100644 --- a/test/unittests/utils/test_input_files.py +++ b/test/unittests/utils/test_input_files.py @@ -1,4 +1,5 @@ import os +from pathlib import Path import pytest @@ -44,7 +45,7 @@ def test_bids_pet_nii_empty(): from clinica.utils.input_files import bids_pet_nii assert bids_pet_nii() == { - "pattern": os.path.join("pet", f"*_pet.nii*"), + "pattern": Path("pet") / "*_pet.nii*", "description": "PET data", } @@ -52,9 +53,8 @@ def test_bids_pet_nii_empty(): @pytest.fixture def expected_bids_pet_query(tracer, reconstruction): return { - "pattern": os.path.join( - "pet", f"*_trc-{tracer.value}_rec-{reconstruction.value}_pet.nii*" - ), + "pattern": Path("pet") + / f"*_trc-{tracer.value}_rec-{reconstruction.value}_pet.nii*", "description": f"PET data with {tracer.value} tracer and reconstruction method {reconstruction.value}", } diff --git a/test/unittests/utils/test_utils_inputs.py b/test/unittests/utils/test_utils_inputs.py index 11083bd04..946fb184d 100644 --- a/test/unittests/utils/test_utils_inputs.py +++ b/test/unittests/utils/test_utils_inputs.py @@ -261,23 +261,21 @@ def test_determine_caps_or_bids(tmp_path): @pytest.mark.parametrize("folder_type", ["BIDS", "CAPS"]) -def test_common_checks(folder_type): +def test_validate_folder_existence(folder_type): from clinica.utils.exceptions import ClinicaBIDSError, ClinicaCAPSError - from clinica.utils.inputs import _common_checks + from clinica.utils.inputs import _validate_folder_existence with pytest.raises( - ValueError, + TypeError, match="Argument you provided to ", ): - _common_checks(1, folder_type) # noqa - - error = ClinicaBIDSError if folder_type == "BIDS" else ClinicaCAPSError + _validate_folder_existence(1, folder_type) # noqa with pytest.raises( - error, + ClinicaBIDSError if folder_type == "BIDS" else ClinicaCAPSError, match=f"The {folder_type} directory you gave is not a folder.", ): - _common_checks(Path("fooooo"), folder_type) + _validate_folder_existence(Path("fooooo"), folder_type) def test_check_bids_folder(tmp_path):