Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Volumes scripts #947

Merged
merged 14 commits into from
Apr 9, 2024
Merged
47 changes: 46 additions & 1 deletion scilpy/image/labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def get_data_as_labels(in_img):
curr_type = in_img.get_data_dtype()

if np.issubdtype(curr_type, np.signedinteger) or \
np.issubdtype(curr_type, np.unsignedinteger):
np.issubdtype(curr_type, np.unsignedinteger):
return np.asanyarray(in_img.dataobj).astype(np.uint16)
else:
basename = os.path.basename(in_img.get_filename())
Expand Down Expand Up @@ -300,3 +300,48 @@ def dilate_labels(data, vox_size, distance, nbr_processes,
data = data.reshape(img_shape)

return data


def get_stats_in_label(map_data, label_data, label_lut):
"""
Get statistics about a map for each label in an atlas.

Parameters
----------
map_data: np.ndarray
The map from which to get statistics.
label_data: np.ndarray
The loaded atlas.
label_lut: dict
The loaded label LUT (look-up table).

Returns
-------
out_dict: dict
A dict with one key per label name, and its values are the computed
statistics.
"""
(label_indices, label_names) = zip(*label_lut.items())

out_dict = {}
for label, name in zip(label_indices, label_names):
label = int(label)
if label != 0:
curr_data = (map_data[np.where(label_data == label)])
nb_vx_roi = np.count_nonzero(label_data == label)
nb_seed_vx = np.count_nonzero(curr_data)

if nb_seed_vx != 0:
mean_seed = np.sum(curr_data) / nb_seed_vx
max_seed = np.max(curr_data)
std_seed = np.sqrt(np.mean(abs(curr_data[curr_data != 0] -
mean_seed) ** 2))

out_dict[name] = {'ROI-idx': label,
'ROI-name': str(name),
'nb-vx-roi': int(nb_vx_roi),
'nb-vx-seed': int(nb_seed_vx),
'max': int(max_seed),
'mean': float(mean_seed),
'std': float(std_seed)}
return out_dict
5 changes: 5 additions & 0 deletions scilpy/image/tests/test_labels.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,3 +157,8 @@ def test_split_labels():
assert len(out_labels) == 3
assert_equal(np.unique(out_labels[0]), [0, 6])
assert_equal(np.unique(out_labels[1]), [0])


def test_stats_in_labels():
# toDO. Will need to create a fake LUT.
pass
10 changes: 10 additions & 0 deletions scilpy/image/tests/test_volume_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,16 @@ def test_compute_snr():
assert np.allclose(snr[0]['snr'], target_val, atol=0.00005)


def test_remove_outliers_ransac():
# Could test, but uses mainly sklearn. Not testing again.
pass


def smooth_to_fwhm():
# toDo
pass


def test_resample_volume():
moving3d = np.pad(np.ones((4, 4, 4)), pad_width=1,
mode='constant', constant_values=0)
Expand Down
96 changes: 96 additions & 0 deletions scilpy/image/volume_b0_synthesis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
import logging
import os
import warnings

# Disable tensorflow warnings
with warnings.catch_warnings():
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.simplefilter("ignore")
from dipy.nn.synb0 import Synb0

import numpy as np
from dipy.align.imaffine import AffineMap
from dipy.segment.tissue import TissueClassifierHMRF
from scipy.ndimage import gaussian_filter

from scilpy.image.volume_operations import register_image


def compute_b0_synthesis(t1_data, t1_bet_data, b0_data, b0_bet_data,
template_data, t1_affine, b0_affine, template_affine,
verbose):
"""
Note. Tensorflow is required here, through dipy.Synb0. If not installed,
dipy will raise an error, like:
>> dipy.utils.tripwire.TripWireError: We need package tensorflow_addons for
these functions, but ``import tensorflow_addons`` raised an ImportError.

Parameters
----------
t1_data: np.ndarary
The T1 (wholebrain, with the skull).
t1_bet_data: np.ndarray
The mask.
b0_data: np.ndarray
The b0 (wholebrain, with the skull).
b0_bet_data: np.ndarray
The mask.
template_data: np.ndarray
The template for typical usage.
t1_affine: np.ndarray
The T1's affine.
b0_affine: np.ndarray
The b0's affine.
template_affine: np.ndarray
The template's affine
verbose: bool
Whether to make dipy's Synb0 verbose.

Returns
-------
rev_b0: np.ndarray
The synthetized b0.
"""
logging.info('The usage of synthetic b0 is not fully tested.'
'Be careful when using it.')

# Crude estimation of the WM mean intensity and normalization
logging.info('Estimating WM mean intensity')
hmrf = TissueClassifierHMRF()
t1_bet_data = gaussian_filter(t1_bet_data, 2)
_, final_segmentation, _ = hmrf.classify(t1_bet_data, 3, 0.25,
tolerance=1e-4, max_iter=5)
avg_wm = np.mean(t1_data[final_segmentation == 3])

# Modifying t1
t1_data /= avg_wm
t1_data *= 110

# SyNB0 works only in a standard space, so we need to register the images
logging.info('Registering images')
# Use the BET image for registration
t1_bet_to_b0, t1_bet_to_b0_transform = register_image(
b0_bet_data, b0_affine, t1_bet_data, t1_affine, fine=True)
affine_map = AffineMap(t1_bet_to_b0_transform,
b0_data.shape, b0_affine,
t1_data.shape, t1_affine)
t1_skull_to_b0 = affine_map.transform(t1_data.astype(np.float64))

# Then register to MNI (using the BET again)
_, t1_bet_to_b0_to_mni_transform = register_image(
template_data, template_affine, t1_bet_to_b0, b0_affine, fine=True)
affine_map = AffineMap(t1_bet_to_b0_to_mni_transform,
template_data.shape, template_affine,
b0_data.shape, b0_affine)

# But for prediction, we want the skull
b0_skull_to_mni = affine_map.transform(b0_data.astype(np.float64))
t1_skull_to_mni = affine_map.transform(t1_skull_to_b0.astype(np.float64))

logging.info('Running SyN-B0')
SyNb0 = Synb0(verbose)
rev_b0 = SyNb0.predict(b0_skull_to_mni, t1_skull_to_mni)
rev_b0 = affine_map.transform_inverse(rev_b0.astype(np.float64))

return rev_b0
46 changes: 46 additions & 0 deletions scilpy/image/volume_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import numpy as np
from numpy import ma
from scipy.ndimage import binary_dilation, gaussian_filter
from sklearn import linear_model

from scilpy.image.reslice import reslice # Don't use Dipy's reslice. Buggy.
from scilpy.io.image import get_data_as_mask
Expand Down Expand Up @@ -152,6 +153,10 @@ def apply_transform(transfo, reference, moving,
elif moving_data.ndim == 4:
if isinstance(moving_data[0, 0, 0], np.void):
raise ValueError('Does not support TrackVis RGB')
else:
logging.warning('You are applying a transform to a 4D volume. If'
'it is a DWI volume, make sure to rotate your '
'bvecs with scil_gradients_apply_transform.py')

affine_map = AffineMap(np.linalg.inv(transfo),
dim[0:3], grid2world,
Expand Down Expand Up @@ -368,6 +373,47 @@ def compute_snr(dwi, bval, bvec, b0_thr, mask,
return val


def remove_outliers_ransac(in_data, min_fit, fit_thr, max_iter):
"""
Remove outliers from image using the RANSAC algorithm.

Parameters
----------
in_data: np.ndarray
The input.
min_fit: int
The minimum number of data values required to fit the model.
fit_thr: float
Threshold value for determining when a data point fits a model.
max_iter: int
The maximum number of iterations allowed in the algorithm.

Returns
-------
out_data: np.ndarray
Data without outliers.
"""
init_shape = in_data.shape
in_data_flat = in_data.flatten()
in_nzr_ind = np.nonzero(in_data_flat)
in_nzr_val = np.array(in_data_flat[in_nzr_ind])

X = in_nzr_ind[0][:, np.newaxis]
model_ransac = linear_model.RANSACRegressor(
base_estimator=linear_model.LinearRegression(), min_samples=min_fit,
residual_threshold=fit_thr, max_trials=max_iter)
model_ransac.fit(X, in_nzr_val)

outlier_mask = np.logical_not(model_ransac.inlier_mask_)
outliers = X[outlier_mask]
logging.info('# outliers: {}'.format(len(outliers)))

in_data_flat[outliers] = 0
out_data = np.reshape(in_data_flat, init_shape)

return out_data


def smooth_to_fwhm(data, fwhm):
"""
Smooth a volume to given FWHM.
Expand Down
27 changes: 0 additions & 27 deletions scilpy/utils/metrics_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,30 +377,3 @@ def plot_metrics_stats(means, stds, title=None, xlabel=None,

plt.close(fig)
return fig


def get_roi_metrics_mean_std(density_map, metrics_files):
"""
Returns the mean and standard deviation of each metric, using the
provided density map. This can be a binary mask,
or contain weighted values between 0 and 1.

Parameters
------------
density_map : ndarray
3D numpy array containing a density map.
metrics_files : sequence
list of nibabel objects representing the metrics files.

Returns
---------
stats : list
list of tuples where the first element of the tuple is the mean
of a metric, and the second element is the standard deviation.

"""

return map(lambda metric_file:
weighted_mean_std(density_map,
metric_file.get_fdata(dtype=np.float64)),
metrics_files)
17 changes: 6 additions & 11 deletions scripts/scil_volume_apply_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,11 @@ def main():
args = parser.parse_args()
logging.getLogger().setLevel(logging.getLevelName(args.verbose))

# Verifications
assert_inputs_exist(parser, [args.in_file, args.in_target_file,
args.in_transfo])
assert_outputs_exist(parser, args, args.out_name)

transfo = load_matrix_in_any_format(args.in_transfo)
if args.inverse:
transfo = np.linalg.inv(transfo)

_, ref_extension = split_name_with_nii(args.in_target_file)
_, in_extension = split_name_with_nii(args.in_file)
if ref_extension not in ['.nii', '.nii.gz']:
Expand All @@ -69,16 +66,14 @@ def main():
if in_extension not in ['.nii', '.nii.gz']:
parser.error('{} is an unsupported format.'.format(args.in_file))

# Load images and validate input type.
# Loading
transfo = load_matrix_in_any_format(args.in_transfo)
if args.inverse:
transfo = np.linalg.inv(transfo)
moving = nib.load(args.in_file)

if moving.get_fdata().ndim == 4:
logging.warning('You are applying a transform to a 4D dwi volume, '
'make sure to rotate your bvecs with '
'scil_gradients_apply_transform.py')

reference = nib.load(args.in_target_file)

# Processing, saving
warped_img = apply_transform(
transfo, reference, moving, keep_dtype=args.keep_dtype)

Expand Down
Loading