Skip to content

Commit

Permalink
Merge branch 'master' of github.com:karanphil/scilpy into b1_correction
Browse files Browse the repository at this point in the history
  • Loading branch information
karp2601 authored and karp2601 committed Feb 20, 2024
2 parents d0ccd1b + 9381895 commit 2d33da1
Show file tree
Hide file tree
Showing 178 changed files with 519 additions and 511 deletions.
34 changes: 16 additions & 18 deletions scilpy/dwi/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def compute_dwi_attenuation(dwi_weights: np.ndarray, b0: np.ndarray):
return dwi_attenuation


def detect_volume_outliers(data, bvecs, bvals, std_scale, verbose,
def detect_volume_outliers(data, bvecs, bvals, std_scale,
b0_thr=DEFAULT_B0_THRESHOLD):
"""
Parameters
Expand All @@ -153,8 +153,6 @@ def detect_volume_outliers(data, bvecs, bvals, std_scale, verbose,
std_scale: float
How many deviation from the mean are required to be considered an
outlier.
verbose: bool
If True, print even more stuff.
b0_thr: float
Value below which b-values are considered as b0.
"""
Expand Down Expand Up @@ -195,26 +193,26 @@ def detect_volume_outliers(data, bvecs, bvals, std_scale, verbose,
outliers_corr = np.argwhere(
results_dict[key][:, 2] < avg_corr - (std_scale * std_corr))

print('Results for shell {} with {} directions:'
.format(key, len(results_dict[key])))
print('AVG and STD of angles: {} +/- {}'
.format(avg_angle, std_angle))
print('AVG and STD of correlations: {} +/- {}'
.format(avg_corr, std_corr))
logging.info('Results for shell {} with {} directions:'
.format(key, len(results_dict[key])))
logging.info('AVG and STD of angles: {} +/- {}'
.format(avg_angle, std_angle))
logging.info('AVG and STD of correlations: {} +/- {}'
.format(avg_corr, std_corr))

if len(outliers_angle) or len(outliers_corr):
print('Possible outliers ({} STD below or above average):'
logging.info('Possible outliers ({} STD below or above average):'
.format(std_scale))
print('Outliers based on angle [position (4D), value]')
logging.info('Outliers based on angle [position (4D), value]')
for i in outliers_angle:
print(results_dict[key][i, :][0][0:2])
print('Outliers based on correlation [position (4D), value]')
logging.info(results_dict[key][i, :][0][0:2])
logging.info('Outliers based on correlation [position (4D), ' +
'value]')
for i in outliers_corr:
print(results_dict[key][i, :][0][0::2])
logging.info(results_dict[key][i, :][0][0::2])
else:
print('No outliers detected.')
logging.info('No outliers detected.')

if verbose:
print('Shell with b-value {}'.format(key))
pprint.pprint(results_dict[key])
logging.debug('Shell with b-value {}'.format(key))
logging.debug("\n" + pprint.pformat(results_dict[key]))
print()
22 changes: 8 additions & 14 deletions scilpy/image/volume_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def register_image(static, static_grid2world, moving, moving_grid2world,
def compute_snr(dwi, bval, bvec, b0_thr, mask,
noise_mask=None, noise_map=None,
split_shells=False,
basename=None, verbose=False):
basename=None):
"""
Compute snr
Expand All @@ -264,16 +264,10 @@ def compute_snr(dwi, bval, bvec, b0_thr, mask,
basename: string
Basename used for naming all output files.
verbose: boolean
Set to use logging
Return
------
Dictionary of values (bvec, bval, mean, std, snr) for all volumes.
"""
if verbose:
logging.getLogger().setLevel(logging.INFO)

data = dwi.get_fdata(dtype=np.float32)
affine = dwi.affine
mask = get_data_as_mask(mask, dtype=bool)
Expand Down Expand Up @@ -416,17 +410,17 @@ def resample_volume(img, ref=None, res=None, iso_min=False, zoom=None,
if interp not in interp_choices:
raise ValueError("interp must be one of 'nn', 'lin', 'quad', 'cubic'.")

logging.debug('Data shape: %s', data.shape)
logging.debug('Data affine: %s', affine)
logging.debug('Data affine setup: %s', nib.aff2axcodes(affine))
logging.debug('Resampling data to %s with mode %s', new_zooms, interp)
logging.info('Data shape: %s', data.shape)
logging.info('Data affine: %s', affine)
logging.info('Data affine setup: %s', nib.aff2axcodes(affine))
logging.info('Resampling data to %s with mode %s', new_zooms, interp)

data2, affine2 = reslice(data, affine, original_zooms, new_zooms,
_interp_code_to_order(interp))

logging.debug('Resampled data shape: %s', data2.shape)
logging.debug('Resampled data affine: %s', affine2)
logging.debug('Resampled data affine setup: %s', nib.aff2axcodes(affine2))
logging.info('Resampled data shape: %s', data2.shape)
logging.info('Resampled data affine: %s', affine2)
logging.info('Resampled data affine setup: %s', nib.aff2axcodes(affine2))

if enforce_dimensions:
if ref is None:
Expand Down
8 changes: 6 additions & 2 deletions scilpy/io/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,13 +225,17 @@ def add_force_b0_arg(parser):


def add_verbose_arg(parser):
parser.add_argument('-v', action='store_true', dest='verbose',
help='If set, produces verbose output.')
parser.add_argument('-v', default="WARNING", const='INFO', nargs='?',
choices=['DEBUG', 'INFO', 'WARNING'], dest='verbose',
help='Produces verbose output depending on '
'the provided level. \nDefault level is warning, '
'default when using -v is info.')

version = importlib.metadata.version('scilpy')

logging.getLogger().setLevel(logging.INFO)
logging.info("Scilpy version: {}".format(version))
logging.getLogger().setLevel(logging.WARNING)


def add_bbox_arg(parser):
Expand Down
4 changes: 2 additions & 2 deletions scilpy/reconst/fodf.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,13 @@ def get_ventricles_max_fodf(data, fa, md, zoom, args):
count += 1
mask[i, j, k] = 1

logging.debug('Number of voxels detected: {}'.format(count))
logging.info('Number of voxels detected: {}'.format(count))
if count == 0:
logging.warning('No voxels found for evaluation! Change your fa '
'and/or md thresholds')
return 0, mask

logging.debug('Average max fodf value: {}'.format(sum_of_max / count))
logging.info('Average max fodf value: {}'.format(sum_of_max / count))
return sum_of_max / count, mask


Expand Down
14 changes: 7 additions & 7 deletions scilpy/reconst/frf.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None,
nvox = np.sum(mask)
response, ratio = response_from_mask_ssst(gtab, data, mask)

logging.debug(
logging.info(
"Number of indices is {:d} with threshold of {:.2f}".format(
nvox, fa_thresh))
fa_thresh -= 0.05
Expand All @@ -117,14 +117,14 @@ def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None,
"Could not find at least {:d} voxels with sufficient FA "
"to estimate the FRF!".format(min_nvox))

logging.debug(
logging.info(
"Found {:d} voxels with FA threshold {:.2f} for "
"FRF estimation".format(nvox, fa_thresh + 0.05))
logging.debug("FRF eigenvalues: {}".format(str(response[0])))
logging.debug("Ratio for smallest to largest eigen value "
"is {:.3f}".format(ratio))
logging.debug("Mean of the b=0 signal for voxels used "
"for FRF: {}".format(response[1]))
logging.info("FRF eigenvalues: {}".format(str(response[0])))
logging.info("Ratio for smallest to largest eigen value "
"is {:.3f}".format(ratio))
logging.info("Mean of the b=0 signal for voxels used "
"for FRF: {}".format(response[1]))

full_response = np.array([response[0][0], response[0][1],
response[0][2], response[1]])
Expand Down
18 changes: 9 additions & 9 deletions scilpy/stats/matrix_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,19 +67,19 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr,
sum_both_groups = np.sum(matrices_g1, axis=2) + np.sum(matrices_g2, axis=2)
nbr_non_zeros = np.count_nonzero(np.triu(sum_both_groups))

logging.debug('The provided matrices contain {} non zeros elements.'
.format(nbr_non_zeros))
logging.info('The provided matrices contain {} non zeros elements.'
.format(nbr_non_zeros))

matrices_g1 = matrices_g1.reshape((np.prod(matrix_shape), nb_group_g1))
matrices_g2 = matrices_g2.reshape((np.prod(matrix_shape), nb_group_g2))
# Negative epsilon, to differentiate from null p-values
matrix_pval = np.ones(np.prod(matrix_shape)) * -0.000001

text = ' paired' if paired else ''
logging.debug('Performing{} t-test with "{}" hypothesis.'
.format(text, tail))
logging.debug('Data has dimensions {}x{} with {} and {} observations.'
.format(matrix_shape[0], matrix_shape[1],
logging.info('Performing{} t-test with "{}" hypothesis.'
.format(text, tail))
logging.info('Data has dimensions {}x{} with {} and {} observations.'
.format(matrix_shape[0], matrix_shape[1],
nb_group_g1, nb_group_g2))

# For conversion to p-values
Expand All @@ -105,7 +105,7 @@ def ttest_two_matrices(matrices_g1, matrices_g2, paired, tail, fdr,

corr_matrix_pval = matrix_pval.reshape(matrix_shape)
if fdr:
logging.debug('Using FDR, the results will be q-values.')
logging.info('Using FDR, the results will be q-values.')
corr_matrix_pval = np.triu(corr_matrix_pval)
corr_matrix_pval[corr_matrix_pval > 0] = multipletests(
corr_matrix_pval[corr_matrix_pval > 0], 0, method='fdr_bh')[1]
Expand Down Expand Up @@ -158,8 +158,8 @@ def omega_sigma(matrix):
transitivity_latt_list = []
path_length_rand_list = []
for i in range(10):
logging.debug('Generating random and lattice matrices, '
'iteration #{}.'.format(i))
logging.info('Generating random and lattice matrices, '
'iteration #{}.'.format(i))
random = bct.randmio_und(matrix, 10)[0]
lattice = bct.latmio_und(matrix, 10)[1]

Expand Down
26 changes: 13 additions & 13 deletions scilpy/stats/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ def verify_normality(data, alpha=0.05):
# First, we verify if sample pass Shapiro-Wilk test
W, p_value = scipy.stats.shapiro(data)
if p_value < alpha and len(data) < 30:
logging.debug('The data sample can not be considered normal')
logging.info('The data sample can not be considered normal')
normality = False
else:
logging.debug('The data sample pass the normality assumption.')
logging.info('The data sample pass the normality assumption.')
normality = True
return normality, p_value

Expand Down Expand Up @@ -76,12 +76,12 @@ def verify_homoscedasticity(data_by_group, normality=False, alpha=0.05):
else:
test = 'Levene'
W, p_value = scipy.stats.levene(*data_by_group)
logging.debug('Test name: {}'.format(test))
logging.info('Test name: {}'.format(test))
if p_value < alpha and mean_nb < 30:
logging.debug('The sample didnt pass the equal variance assumption')
logging.info('The sample didnt pass the equal variance assumption')
homoscedasticity = False
else:
logging.debug('The sample pass the equal variance assumption')
logging.info('The sample pass the equal variance assumption')
homoscedasticity = True

return test, homoscedasticity, p_value
Expand Down Expand Up @@ -145,12 +145,12 @@ def verify_group_difference(data_by_group, normality=False,
test = 'Kruskalwallis'
T, p_value = scipy.stats.kruskal(*data_by_group)

logging.debug('Test name: {}'.format(test))
logging.info('Test name: {}'.format(test))
if p_value < alpha:
logging.debug('There is a difference between groups')
logging.info('There is a difference between groups')
difference = True
else:
logging.debug('We are not able to detect difference between the groups.')
logging.info('We are not able to detect difference between the groups.')
difference = False

return test, difference, p_value
Expand Down Expand Up @@ -191,9 +191,9 @@ def verify_post_hoc(data_by_group, groups_list, test,
test : string
Name of the test done to verify group difference
"""
logging.debug('We need to do a post-hoc analysis since '
'there is a difference')
logging.debug('Post-hoc: {} pairwise'.format(test))
logging.info('We need to do a post-hoc analysis since '
'there is a difference')
logging.info('Post-hoc: {} pairwise'.format(test))
differences = []
nb_group = len(groups_list)

Expand All @@ -214,7 +214,7 @@ def verify_post_hoc(data_by_group, groups_list, test,
data_by_group[x], data_by_group[y])
differences.append((groups_list[x], groups_list[y],
p_value < alpha, p_value))
logging.debug('Result:')
logging.debug(differences)
logging.info('Result:')
logging.info(differences)

return test, differences
32 changes: 17 additions & 15 deletions scilpy/stats/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ def __init__(self, json_file, participants):
self.data_dictionnary[participant['participant_id']]\
[variable] = participant[variable]

logging.debug('Data_dictionnary')
logging.debug(self.data_dictionnary[self.get_first_participant()])
logging.info('Data_dictionnary')
logging.info(self.data_dictionnary[self.get_first_participant()])

with open('data.json', 'w') as fp:
json.dump(self.data_dictionnary, fp, indent=4)
Expand All @@ -64,31 +64,33 @@ def validation_participant_id(self, json_info, participants_info):
# Create the list of participants id from the json dictionnary

participants_from_json = list(json_info.keys())
logging.debug('participant list from json dictionnary:')
logging.debug(participants_from_json)
logging.info('participant list from json dictionnary:')
logging.info(participants_from_json)

# Create the list of participants id from the tsv list of dictionnary
participants_from_tsv = []
for participant in participants_info:
participants_from_tsv.append(participant['participant_id'])
logging.debug('participant list from tsv file:')
logging.debug(participants_from_tsv)
logging.info('participant list from tsv file:')
logging.info(participants_from_tsv)

# Compare the two list
participants_from_json.sort()
participants_from_tsv.sort()

if not participants_from_json == participants_from_tsv:
if not len(participants_from_json) == len(participants_from_tsv):
logging.debug('The number of participants from json file is not the same '
'as the one in the tsv file.')
logging.info('The number of participants from json file is '
'not the same as the one in the tsv file.')
is_in_tsv = np.in1d(participants_from_json, participants_from_tsv)
is_in_json = np.in1d(participants_from_tsv, participants_from_json)

logging.debug('participants list from json file missing in tsv file :')
logging.debug(np.asarray(participants_from_json)[~is_in_tsv])
logging.debug('participants list from tsv file missing in json file :')
logging.debug(np.asarray(participants_from_tsv)[~is_in_json])
logging.info('participants list from json file missing in tsv '
'file :')
logging.info(np.asarray(participants_from_json)[~is_in_tsv])
logging.info('participants list from tsv file missing in json '
'file :')
logging.info(np.asarray(participants_from_tsv)[~is_in_json])

logging.error('The subjects from the json file does not fit '
'with the subjects of the tsv file. '
Expand All @@ -97,7 +99,7 @@ def validation_participant_id(self, json_info, participants_info):
'with the subjects of the tsv file. '
'Impossible to build the data_for_stat object')
else:
logging.debug('The json and the tsv are compatible')
logging.info('The json and the tsv are compatible')

def get_participants_list(self):
# Construct the list of participant_id from the data_dictionnary
Expand Down Expand Up @@ -492,6 +494,6 @@ def visualise_distribution(data_by_group, participants_id, bundle, metric,

fig.savefig(os.path.join(oFolder, 'Graph', bundle, metric))

logging.debug('outliers:[(id, group)]')
logging.debug(outliers)
logging.info('outliers:[(id, group)]')
logging.info(outliers)
return outliers
4 changes: 2 additions & 2 deletions scilpy/tracking/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ def _set_nbr_processes(self, nbr_processes):

if nbr_processes > self.nbr_seeds:
nbr_processes = self.nbr_seeds
logging.debug("Setting number of processes to {} since there were "
"less seeds than processes.".format(nbr_processes))
logging.info("Setting number of processes to {} since there were "
"less seeds than processes.".format(nbr_processes))
return nbr_processes

def _prepare_multiprocessing_pool(self, tmpdir):
Expand Down
6 changes: 3 additions & 3 deletions scilpy/tractanalysis/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ def remove_loops_and_sharp_turns(streamlines,
if tm.mean_curvature(clusters.centroids[i]) <= mean_curvature:
ids.extend(clusters[i].indices)
else:
logging.debug("Impossible to use the use_qb option because " +
"not more than one streamline left from the\n" +
"input file.")
logging.info("Impossible to use the use_qb option because " +
"not more than one streamline left from the\n" +
"input file.")
return ids


Expand Down
Loading

0 comments on commit 2d33da1

Please sign in to comment.