Skip to content

Commit

Permalink
Merge branch 'master' of github.com:scilus/scilpy into compare_tracto…
Browse files Browse the repository at this point in the history
…gram_big
  • Loading branch information
frheault committed Feb 21, 2024
2 parents 122db3d + 89ac8ce commit 493399d
Show file tree
Hide file tree
Showing 189 changed files with 2,398 additions and 1,103 deletions.
5 changes: 4 additions & 1 deletion .coveragerc
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
branch = True
concurrency = multiprocessing
data_file = .coverage
source_pkgs =
source_pkgs =
scilpy
scripts
relative_files = True
Expand All @@ -17,6 +17,9 @@ omit =
[report]
skip_empty = True
skip_covered = True
exclude_also =
if __name__ == "__main__":
(?<!def )main()

[html]
title = Scilpy Coverage Report
Expand Down
44 changes: 36 additions & 8 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ jobs:
test:
runs-on: scilus-runners
if: github.repository == 'scilus/scilpy'

steps:
- name: Checkout repository for merge
- name: Checkout repository at merge
uses: actions/checkout@v4

- name: Fetch python version from repository
Expand Down Expand Up @@ -55,19 +56,46 @@ jobs:
export C_INCLUDE_PATH=$pythonLocation/include/python${{ steps.python-selector.outputs.python-version }}:$C_INCLUDE_PATH
pytest --cov-report term-missing:skip-covered
- name: Save test results and coverage
uses: actions/upload-artifact@v4
id: test-coverage-results
with:
name: test-coverage-${{ github.run_id }}
retention-days: 1
path: |
.coverage
.test_reports/
coverage:
runs-on: scilus-runners
if: github.repository == 'scilus/scilpy'
needs: test

steps:
- name: Checkout repository at merge
uses: actions/checkout@v4

- name: Set up Python for codecov upload
uses: actions/[email protected]
with:
python-version: '3.10'
cache: 'pip'

- name: Install pycoverage
run: pip install coverage

- name: Download test results and coverage
uses: actions/download-artifact@v4
with:
name: test-coverage-${{ github.run_id }}

- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: unittests
name: scilpy-unittests-${{ github.run_id }}
name: scilpy-unittests-${{ github.run_id }}-${{ github.run_attempt }}
verbose: true
fail_ci_if_error: true
plugin: pycoverage

- name: Upload test reports and coverage to artifacts
uses: actions/[email protected]
with:
name: test-reports
path: |
.test_reports/*
34 changes: 16 additions & 18 deletions scilpy/dwi/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def compute_dwi_attenuation(dwi_weights: np.ndarray, b0: np.ndarray):
return dwi_attenuation


def detect_volume_outliers(data, bvecs, bvals, std_scale, verbose,
def detect_volume_outliers(data, bvecs, bvals, std_scale,
b0_thr=DEFAULT_B0_THRESHOLD):
"""
Parameters
Expand All @@ -153,8 +153,6 @@ def detect_volume_outliers(data, bvecs, bvals, std_scale, verbose,
std_scale: float
How many deviation from the mean are required to be considered an
outlier.
verbose: bool
If True, print even more stuff.
b0_thr: float
Value below which b-values are considered as b0.
"""
Expand Down Expand Up @@ -195,26 +193,26 @@ def detect_volume_outliers(data, bvecs, bvals, std_scale, verbose,
outliers_corr = np.argwhere(
results_dict[key][:, 2] < avg_corr - (std_scale * std_corr))

print('Results for shell {} with {} directions:'
.format(key, len(results_dict[key])))
print('AVG and STD of angles: {} +/- {}'
.format(avg_angle, std_angle))
print('AVG and STD of correlations: {} +/- {}'
.format(avg_corr, std_corr))
logging.info('Results for shell {} with {} directions:'
.format(key, len(results_dict[key])))
logging.info('AVG and STD of angles: {} +/- {}'
.format(avg_angle, std_angle))
logging.info('AVG and STD of correlations: {} +/- {}'
.format(avg_corr, std_corr))

if len(outliers_angle) or len(outliers_corr):
print('Possible outliers ({} STD below or above average):'
logging.info('Possible outliers ({} STD below or above average):'
.format(std_scale))
print('Outliers based on angle [position (4D), value]')
logging.info('Outliers based on angle [position (4D), value]')
for i in outliers_angle:
print(results_dict[key][i, :][0][0:2])
print('Outliers based on correlation [position (4D), value]')
logging.info(results_dict[key][i, :][0][0:2])
logging.info('Outliers based on correlation [position (4D), ' +
'value]')
for i in outliers_corr:
print(results_dict[key][i, :][0][0::2])
logging.info(results_dict[key][i, :][0][0::2])
else:
print('No outliers detected.')
logging.info('No outliers detected.')

if verbose:
print('Shell with b-value {}'.format(key))
pprint.pprint(results_dict[key])
logging.debug('Shell with b-value {}'.format(key))
logging.debug("\n" + pprint.pformat(results_dict[key]))
print()
42 changes: 38 additions & 4 deletions scilpy/gradients/tests/test_gradients_utils.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,44 @@
# -*- coding: utf-8 -*-
import numpy as np

from scilpy.gradients.bvec_bval_tools import is_normalized_bvecs
from scilpy.gradients.utils import random_uniform_on_sphere, \
get_new_gtab_order


def test_random_uniform_on_sphere():
pass
bvecs = random_uniform_on_sphere(10)

# Confirm that they are unit vectors.
assert is_normalized_bvecs(bvecs)

# Can't check much more than that. Supervising that no co-linear vectors.
# (Each pair of vector should have at least some angle in-between)
# We also tried to check if the closest neighbor to each vector is more or
# less always with the same angle, but it is very variable.
min_expected_angle = 1.0
smallests = []
for i in range(10):
angles = np.rad2deg(np.arccos(np.dot(bvecs[i, :], bvecs.T)))
# Smallest, except 0 (with itself). Sometimes this is nan.
smallests.append(np.nanmin(angles[angles > 1e-5]))
assert np.all(np.asarray(smallests) > min_expected_angle)


def test_get_new_gtab_order():
# Using N=4 vectors
philips_table = np.asarray([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]])
dwi = np.ones((10, 10, 10, 4))
bvecs = np.asarray([[3, 3, 3],
[4, 4, 4],
[2, 2, 2],
[1, 1, 1]])
bvals = np.asarray([3, 4, 2, 1])

order = get_new_gtab_order(philips_table, dwi, bvals, bvecs)

def test_get_new_order_philips():
# Needs dwi files - philips version
pass
assert np.array_equal(bvecs[order, :], philips_table[:, 0:3])
assert np.array_equal(bvals[order], philips_table[:, 3])
72 changes: 44 additions & 28 deletions scilpy/gradients/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,11 @@
def random_uniform_on_sphere(nb_vectors):
"""
Creates a set of K pseudo-random unit vectors, following a uniform
distribution on the sphere.
distribution on the sphere. Reference: Emmanuel Caruyer's code
(https://github.com/ecaruyer).
This is not intended to create a perfect result. It's usually the
initialization step of a repulsion strategy.
Parameters
----------
Expand All @@ -15,14 +19,21 @@ def random_uniform_on_sphere(nb_vectors):
Returns
-------
bvecs: nd.array
pseudo-random unit vector
bvecs: nd.array of shape (nb_vectors, 3)
Pseudo-random unit vectors
"""
# Note. Caruyer's docstring says it's a uniform on the half-sphere, but
# plotted a few results: it is one the whole sphere.
phi = 2 * np.pi * np.random.rand(nb_vectors)

r = 2 * np.sqrt(np.random.rand(nb_vectors))
theta = 2 * np.arcsin(r / 2)

# See here:
# https://www.bogotobogo.com/Algorithms/uniform_distribution_sphere.php
# They seem to be using something like this instead:
# theta = np.arccos(2 * np.random.rand(nb_vectors) - 1.0)

bvecs = np.zeros((nb_vectors, 3))
bvecs[:, 0] = np.sin(theta) * np.cos(phi)
bvecs[:, 1] = np.sin(theta) * np.sin(phi)
Expand All @@ -31,54 +42,59 @@ def random_uniform_on_sphere(nb_vectors):
return bvecs


def get_new_order_philips(philips_table, dwi, bvals, bvecs):
def get_new_gtab_order(ref_gradient_table, dwi, bvals, bvecs):
"""
Reorder bval and bvec files based on the philips table.
Find the sorting order that could be applied to the bval and bvec files to
obtain the same order as in the reference gradient table.
This is mostly useful to reorder bval and bvec files in the order they were
acquired by the Philips scanner (before version 5.6).
Parameters
----------
philips_table: nd.array
Philips gradient table
dwis: nibabel
dwis
ref_gradient_table: nd.array
Gradient table, of shape (N, 4). It will use as reference for the
ordering of b-vectors.
Ex: Could be the result of scil_gradients_generate_sampling.py
dwi: nibabel image
dwi of shape (x, y, z, N). Only used to confirm the dwi's shape.
bvals : array, (N,)
bvals
bvals that need to be reordered.
bvecs : array, (N, 3)
bvecs
bvecs that need to be reordered.
Returns
-------
new_index: nd.array
New index to reorder bvals/bvec
"""
# Check number of gradients, bvecs, bvals, dwi and oTable
if len(bvecs) != dwi.shape[3] or len(bvals) != len(philips_table):
raise ValueError('bvec/bval/dwi and original table \
does not contain the same number of gradients')

# Check bvals
philips_bval = np.unique(philips_table[:, 3])
if not (len(bvecs) == dwi.shape[3] == len(bvals) ==
len(ref_gradient_table)):
raise ValueError('bvec/bval/dwi and reference table do not contain '
'the same number of gradients.')

philips_dwi_shells = philips_bval[philips_bval > 1]
philips_b0s = philips_bval[philips_bval < 1]
ref_bval = np.unique(ref_gradient_table[:, 3])
ref_dwi_shells = ref_bval[ref_bval > 1]
ref_b0s = ref_bval[ref_bval < 1]

dwi_shells = np.unique(bvals[bvals > 1])
b0s = np.unique(bvals[bvals < 1])

if len(philips_dwi_shells) != len(dwi_shells) or\
len(philips_b0s) != len(b0s):
raise ValueError('bvec/bval/dwi and original table\
does not contain the same shells')
if len(ref_dwi_shells) != len(dwi_shells) or \
len(ref_b0s) != len(b0s):
raise ValueError('bvec/bval/dwi and reference table do not contain '
'the same shells.')

new_index = np.zeros(bvals.shape)

for nbval in philips_bval:
for nbval in ref_bval:
curr_bval = np.where(bvals == nbval)[0]
curr_bval_table = np.where(philips_table[:, 3] == nbval)[0]
curr_bval_table = np.where(ref_gradient_table[:, 3] == nbval)[0]

if len(curr_bval) != len(curr_bval_table):
raise ValueError('bval/bvec and orginal table does not contain \
the same number of gradients for shell {0}'.format(curr_bval))
raise ValueError('bval/bvec and orginal table do not contain '
'the same number of gradients for shell {}.'
.format(curr_bval))

new_index[curr_bval_table] = curr_bval

Expand Down
22 changes: 8 additions & 14 deletions scilpy/image/volume_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def register_image(static, static_grid2world, moving, moving_grid2world,
def compute_snr(dwi, bval, bvec, b0_thr, mask,
noise_mask=None, noise_map=None,
split_shells=False,
basename=None, verbose=False):
basename=None):
"""
Compute snr
Expand All @@ -265,16 +265,10 @@ def compute_snr(dwi, bval, bvec, b0_thr, mask,
basename: string
Basename used for naming all output files.
verbose: boolean
Set to use logging
Return
------
Dictionary of values (bvec, bval, mean, std, snr) for all volumes.
"""
if verbose:
logging.getLogger().setLevel(logging.INFO)

data = dwi.get_fdata(dtype=np.float32)
affine = dwi.affine
mask = get_data_as_mask(mask, dtype=bool)
Expand Down Expand Up @@ -417,17 +411,17 @@ def resample_volume(img, ref=None, res=None, iso_min=False, zoom=None,
if interp not in interp_choices:
raise ValueError("interp must be one of 'nn', 'lin', 'quad', 'cubic'.")

logging.debug('Data shape: %s', data.shape)
logging.debug('Data affine: %s', affine)
logging.debug('Data affine setup: %s', nib.aff2axcodes(affine))
logging.debug('Resampling data to %s with mode %s', new_zooms, interp)
logging.info('Data shape: %s', data.shape)
logging.info('Data affine: %s', affine)
logging.info('Data affine setup: %s', nib.aff2axcodes(affine))
logging.info('Resampling data to %s with mode %s', new_zooms, interp)

data2, affine2 = reslice(data, affine, original_zooms, new_zooms,
_interp_code_to_order(interp))

logging.debug('Resampled data shape: %s', data2.shape)
logging.debug('Resampled data affine: %s', affine2)
logging.debug('Resampled data affine setup: %s', nib.aff2axcodes(affine2))
logging.info('Resampled data shape: %s', data2.shape)
logging.info('Resampled data affine: %s', affine2)
logging.info('Resampled data affine setup: %s', nib.aff2axcodes(affine2))

if enforce_dimensions:
if ref is None:
Expand Down
8 changes: 6 additions & 2 deletions scilpy/io/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,13 +225,17 @@ def add_force_b0_arg(parser):


def add_verbose_arg(parser):
parser.add_argument('-v', action='store_true', dest='verbose',
help='If set, produces verbose output.')
parser.add_argument('-v', default="WARNING", const='INFO', nargs='?',
choices=['DEBUG', 'INFO', 'WARNING'], dest='verbose',
help='Produces verbose output depending on '
'the provided level. \nDefault level is warning, '
'default when using -v is info.')

version = importlib.metadata.version('scilpy')

logging.getLogger().setLevel(logging.INFO)
logging.info("Scilpy version: {}".format(version))
logging.getLogger().setLevel(logging.WARNING)


def add_bbox_arg(parser):
Expand Down
Loading

0 comments on commit 493399d

Please sign in to comment.