Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

RF - Input to volume stats scripts #1068

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
120 changes: 62 additions & 58 deletions scripts/scil_volume_stats_in_ROI.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@

"""
Compute the statistics (mean, std) of scalar maps, which can represent
diffusion metrics, in a ROI. Prints the results.
diffusion metrics, in ROIs. Prints the results.

The mask can either be a binary mask, or a weighting mask. If the mask is
a weighting mask it should either contain floats between 0 and 1 or should be
normalized with --normalize_weights. IMPORTANT: if the mask contains weights
The ROIs can either be a binary masks, or a weighting masks. If the ROIs are
weighting masks it should either contain floats between 0 and 1 or should be
normalized with --normalize_weights. IMPORTANT: if the ROIs contain weights
(and not 0 and 1 exclusively), the standard deviation will also be weighted.
"""

Expand All @@ -33,9 +33,9 @@ def _build_arg_parser():
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)

p.add_argument('in_mask',
help='Mask volume filename.\nCan be a binary mask or a '
'weighted mask.')
p.add_argument('in_rois', nargs='+',
help='ROIs volume filenames.\nCan be binary masks or '
'weighted masks.')

g = p.add_argument_group('Metrics input options')
gg = g.add_mutually_exclusive_group(required=True)
Expand Down Expand Up @@ -74,59 +74,63 @@ def main():
if not os.path.exists(args.metrics_dir):
parser.error("Metrics directory does not exist: {}"
.format(args.metrics_dir))
assert_inputs_exist(parser, args.in_mask)
assert_inputs_exist(parser, args.in_rois)

tmp_file_list = glob.glob(os.path.join(args.metrics_dir, '*nii.gz'))
args.metrics_file_list = [os.path.join(args.metrics_dir, f)
for f in tmp_file_list]
args.metrics_file_list = glob.glob(os.path.join(args.metrics_dir, '*nii.gz'))
else:
assert_inputs_exist(parser, [args.in_mask] + args.metrics_file_list)
assert_headers_compatible(parser,
[args.in_mask] + args.metrics_file_list)

# Loading
mask_data = nib.load(args.in_mask).get_fdata(dtype=np.float32)
if len(mask_data.shape) > 3:
parser.error('Mask should be a 3D image.')
if np.min(mask_data) < 0:
parser.error('Mask should not contain negative values.')
roi_name = split_name_with_nii(os.path.basename(args.in_mask))[0]

# Discussion about the way the normalization is done.
# https://github.com/scilus/scilpy/pull/202#discussion_r411355609
# Summary:
# 1) We don't want to normalize with data = (data-min) / (max-min) because
# it zeroes out the minimal values of the array. This is not a large error
# source, but not preferable.
# 2) data = data / max(data) or data = data / sum(data): in practice, when
# we use them in numpy using their weights argument, leads to the same
# result.
if args.normalize_weights:
mask_data /= np.max(mask_data)
elif args.bin:
mask_data[np.where(mask_data > 0.0)] = 1.0
elif np.min(mask_data) < 0.0 or np.max(mask_data) > 1.0:
parser.error('Mask data should only contain values between 0 and 1. '
'Try --normalize_weights.')

# Load and process all metrics files.
json_stats = {roi_name: {}}
for f in args.metrics_file_list:
metric_img = nib.load(f)
metric_name = split_name_with_nii(os.path.basename(f))[0]
if len(metric_img.shape) == 3:
data = metric_img.get_fdata(dtype=np.float64)
if np.any(np.isnan(data)):
logging.warning("Metric '{}' contains some NaN. Ignoring "
"voxels with NaN."
.format(os.path.basename(f)))
mean, std = weighted_mean_std(mask_data, data)
json_stats[roi_name][metric_name] = {'mean': mean,
'std': std}
else:
parser.error(
'Metric {} is not a 3D image ({}D shape).'
.format(f, len(metric_img.shape)))
assert_inputs_exist(parser, args.in_rois + args.metrics_file_list)
assert_headers_compatible(parser, args.in_rois + args.metrics_file_list)

# Computing stats for all ROIs and metrics files
json_stats = {}
for roi_filename in args.in_rois:
roi_data = nib.load(roi_filename).get_fdata(dtype=np.float32)
if len(roi_data.shape) > 3:
parser.error('ROI {} should be a 3D image.'.format(roi_filename))
if np.min(roi_data) < 0:
parser.error('ROI {} should not contain negative values.'
.format(roi_filename))
roi_name = split_name_with_nii(os.path.basename(roi_filename))[0]

# Discussion about the way the normalization is done.
# https://github.com/scilus/scilpy/pull/202#discussion_r411355609
# Summary:
# 1) We don't want to normalize with data = (data-min) / (max-min)
# because it zeroes out the minimal values of the array. This is
# not a large error source, but not preferable.
# 2) data = data / max(data) or data = data / sum(data): in practice,
# when we use them in numpy using their weights argument, leads to the
# same result.
if args.normalize_weights:
roi_data /= np.max(roi_data)
elif args.bin:
roi_data[np.where(roi_data > 0.0)] = 1.0
elif np.min(roi_data) < 0.0 or np.max(roi_data) > 1.0:
parser.error('ROI {} data should only contain values between 0 '
'and 1. Try --normalize_weights.'
.format(roi_filename))

# Load and process all metrics files.
json_stats[roi_name] = {}
for f in args.metrics_file_list:
metric_img = nib.load(f)
metric_name = split_name_with_nii(os.path.basename(f))[0]
if len(metric_img.shape) == 3:
data = metric_img.get_fdata(dtype=np.float64)
if np.any(np.isnan(data)):
logging.warning("Metric '{}' contains some NaN. Ignoring "
"voxels with NaN."
.format(os.path.basename(f)))
mean, std = weighted_mean_std(roi_data, data)
json_stats[roi_name][metric_name] = {'mean': mean,
'std': std}
else:
parser.error(
'Metric {} is not a 3D image ({}D shape).'
.format(f, len(metric_img.shape)))

if len(args.in_rois) == 1:
json_stats = json_stats[roi_name]

# Print results
print(json.dumps(json_stats, indent=args.indent, sort_keys=args.sort_keys))
Expand Down
54 changes: 42 additions & 12 deletions scripts/scil_volume_stats_in_labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,18 @@
"""

import argparse
import glob
import json
import logging
import os

import nibabel as nib
import numpy as np

from scilpy.image.labels import get_data_as_labels, get_stats_in_label
from scilpy.io.utils import (add_overwrite_arg, add_verbose_arg,
from scilpy.io.utils import (add_json_args, add_overwrite_arg, add_verbose_arg,
assert_inputs_exist, assert_headers_compatible)
from scilpy.utils.filenames import split_name_with_nii


def _build_arg_parser():
Expand All @@ -30,9 +33,17 @@ def _build_arg_parser():
p.add_argument('in_labels_lut',
help='Path of the LUT file corresponding to labels,'
'used to name the regions of interest.')
p.add_argument('in_map',
help='Path of the input map file. Expecting a 3D file.')

g = p.add_argument_group('Metrics input options')
gg = g.add_mutually_exclusive_group(required=True)
gg.add_argument('--metrics_dir', metavar='dir',
help='Name of the directory containing metrics files: '
'we will \nload all nifti files.')
gg.add_argument('--metrics', dest='metrics_file_list', nargs='+',
metavar='file',
help='Metrics nifti filename. List of the names of the '
'metrics file, \nin nifti format.')
add_json_args(p)
add_verbose_arg(p)
add_overwrite_arg(p)

Expand All @@ -45,21 +56,40 @@ def main():
logging.getLogger().setLevel(logging.getLevelName(args.verbose))

# Verifications
assert_inputs_exist(parser,
[args.in_labels, args.in_map, args.in_labels_lut])
assert_headers_compatible(parser, [args.in_labels, args.in_map])

# Get input list. Either all files in dir or given list.
if args.metrics_dir:
if not os.path.exists(args.metrics_dir):
parser.error("Metrics directory does not exist: {}"
.format(args.metrics_dir))
assert_inputs_exist(parser, [args.in_labels, args.in_labels_lut])

args.metrics_file_list = glob.glob(os.path.join(args.metrics_dir, '*nii.gz'))
else:
assert_inputs_exist(parser, [args.in_labels] + args.metrics_file_list)
assert_headers_compatible(parser,
[args.in_labels] + args.metrics_file_list)

# Loading
label_data = get_data_as_labels(nib.load(args.in_labels))
with open(args.in_labels_lut) as f:
label_dict = json.load(f)
map_data = nib.load(args.in_map).get_fdata(dtype=np.float32)
if len(map_data.shape) > 3:
parser.error('Mask should be a 3D image.')

# Process
out_dict = get_stats_in_label(map_data, label_data, label_dict)
print(json.dumps(out_dict))
# Computing stats for all metrics files
json_stats = {}
for metric_filename in args.metrics_file_list:
metric_data = nib.load(metric_filename).get_fdata(dtype=np.float32)
metric_name = split_name_with_nii(os.path.basename(metric_filename))[0]
if len(metric_data.shape) > 3:
parser.error('Mask should be a 3D image.')

# Process
out_dict = get_stats_in_label(metric_data, label_data, label_dict)
json_stats[metric_name] = out_dict

if len(args.metrics_file_list) == 1:
json_stats = json_stats[metric_name]
print(json.dumps(json_stats, indent=args.indent, sort_keys=args.sort_keys))


if __name__ == "__main__":
Expand Down
30 changes: 27 additions & 3 deletions scripts/tests/test_volume_stats_in_ROI.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,34 @@ def test_help_option(script_runner):

def test_execution_tractometry(script_runner, monkeypatch):
monkeypatch.chdir(os.path.expanduser(tmp_dir.name))
in_mask = os.path.join(SCILPY_HOME, 'tractometry',
'IFGWM.nii.gz')
in_roi = os.path.join(SCILPY_HOME, 'tractometry',
'IFGWM.nii.gz')
in_ref = os.path.join(SCILPY_HOME, 'tractometry',
'mni_masked.nii.gz')

# Test with a single ROI input
ret = script_runner.run('scil_volume_stats_in_ROI.py',
in_roi, '--metrics', in_ref)
assert ret.success

# Test with multiple ROIs input
ret = script_runner.run('scil_volume_stats_in_ROI.py',
in_roi, in_roi, in_roi, '--metrics', in_ref)
assert ret.success

# Test with multiple metric input
ret = script_runner.run('scil_volume_stats_in_ROI.py',
in_roi, '--metrics', in_ref, in_ref, in_ref)
assert ret.success

# Test with multiple metric and ROIs input
ret = script_runner.run('scil_volume_stats_in_ROI.py',
in_roi, in_roi, '--metrics', in_ref, in_ref)
assert ret.success

# Test with a metric folder
metrics_dir = os.path.join(SCILPY_HOME, 'plot')
in_roi = os.path.join(SCILPY_HOME, 'plot', 'mask_wm.nii.gz')
ret = script_runner.run('scil_volume_stats_in_ROI.py',
in_mask, '--metrics', in_ref)
in_roi, '--metrics_dir', metrics_dir)
assert ret.success
19 changes: 17 additions & 2 deletions scripts/tests/test_volume_stats_in_labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,24 @@ def test_help_option(script_runner):

def test_execution(script_runner, monkeypatch):
monkeypatch.chdir(os.path.expanduser(tmp_dir.name))
in_map = os.path.join(SCILPY_HOME, 'plot', 'fa.nii.gz')
in_metric = os.path.join(SCILPY_HOME, 'plot', 'fa.nii.gz')
in_atlas = os.path.join(SCILPY_HOME, 'plot', 'atlas_brainnetome.nii.gz')
atlas_lut = os.path.join(SCILPY_HOME, 'plot', 'atlas_brainnetome.json')

# Test with a single metric
ret = script_runner.run('scil_volume_stats_in_labels.py',
in_atlas, atlas_lut, "--metrics", in_metric)
assert ret.success

# Test with multiple metrics
ret = script_runner.run('scil_volume_stats_in_labels.py',
in_atlas, atlas_lut, "--metrics",
in_metric, in_metric, in_metric)
assert ret.success

# Test with a metric folder
metrics_dir = os.path.join(SCILPY_HOME, 'plot')
ret = script_runner.run('scil_volume_stats_in_labels.py',
in_atlas, atlas_lut, in_map)
in_atlas, atlas_lut, "--metrics_dir",
metrics_dir)
assert ret.success
Loading