Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/canlab/CanlabCore
Browse files Browse the repository at this point in the history
  • Loading branch information
torwager committed Jan 25, 2024
2 parents 8d07787 + 7ea4fa3 commit ff735c8
Show file tree
Hide file tree
Showing 20 changed files with 891 additions and 273 deletions.
132 changes: 132 additions & 0 deletions CanlabCore/@atlas/downsample_parcellation.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
function new_atlas_obj = downsample_parcellation(obj, varargin)
% An atlas_obj has multiple label fields meant to label nested parcellations
% at different levels of granularity. labels is the default which must
% contain unique entries and correspond to the number of unique indices
% in the (voxelwise) parcellation index map, but labels_2 ... labels_5
% may contain non-unique entries corresponding to coarser parcellations.
% This function remaps indices and probability maps to correspond to one
% of these coarser parcellations. It is a lossy operations and all labels
% of finer parcellation than the selected level will be discarded.
%
% Input ::
% obj - an atlas_object. Must have at least one of labels_2 ...
% labels_5 defined
% varargin - either a vector of new labels to use, a string or leave empty.
% If you provide a vector, its length should equal number of
% regions in original atlas, with redundant labels for regions
% you want to combine in the downsampled atlas.
% If you provide a string, it should be one of
% {'labels_2', 'labels_3', 'labels_4', 'labels_5'} which will
% then be used as the downsampling vector.
% If you don't provide anything the contents of labels_2 is used
% by default.
%
% Output ::
% atlas_obj - a new atlas object, reindexed according to labelfield

fprintf('Downsampling %s parcels\n', obj.atlas_name);

if isempty(varargin)
labelfield = 'labels_2';
elseif ischar(varargin{1})
labelfield=varargin{1};
elseif isvector(varargin{1})
if length(varargin{1}) ~= num_regions(obj)
error('New labels has length %d which does not match input atlas parcel count (%d)',length(varargin{1}), num_regions(obj));
end
labelfield='labels';
obj.lbaels = varargin{1};
else
error('Unrecognized input datatype');
end

% input check
valid_lbls = {'labels', 'labels_2', 'labels_3', 'labels_4', 'labels_5'};
if ~ismember(labelfield, valid_lbls)
error('labelfield must be one of {''labels_2'', ''labels_3'', ''labels_4'', ''labels_5''})');
end

% check that labels are ordered with finer labels first
n_unique_lbls = zeros(1,length(valid_lbls));
n_lbls = zeros(1,length(valid_lbls));
for i = 1:length(valid_lbls)
lbl = valid_lbls{i};
n_unique_lbls(i) = length(unique(obj.(lbl)));
n_lbls(i) = length(obj.(lbl));
end

new_ind = find(strcmp(valid_lbls,labelfield));
if n_unique_lbls(new_ind) > n_unique_lbls(1)
error('labelfield %s has %d labels, which is finer grained than labels which has %d unique labels',labelfield, n_unique_lbls(new_ind), n_unique_lbls(1));
end

new_lbl = valid_lbls{new_ind};
% note: we remove any label fields below if they're not of the appropriate
% length to correspond to the original parcellation
kept_lbl_ind = find((n_unique_lbls <= n_unique_lbls(new_ind)) & ...
(n_lbls == length(obj.(labelfield))));

rm_lbl_ind = find(~ismember(1:length(valid_lbls), 1:length(kept_lbl_ind)));

% merge high resolution regions
[new_lbl_parcels, lbl_exp] = unique(obj.(new_lbl),'stable');
new_parcels = cell(1,length(new_lbl_parcels));

% init progress watcher
n_reg = length(new_lbl_parcels);
last_msg = sprintf('Creating new region 1/%d',n_reg);
fprintf('%s',last_msg);

% working with sparse matrices is much faster for atlases with many
% parcels, while atlases with few parcels are likely to run quickly
% regardless
obj.probability_maps = sparse(obj.probability_maps);
for i = 1:length(new_lbl_parcels)
% this can be parallelized but it's very memory intensive for some
% atlases (e.g. canlab2023) and parallelization makes the problem worse
% by several factors.

% update progress watcher
delete_last_chars = length(last_msg);
fprintf('%s',char(8*ones(1,delete_last_chars)))
new_msg = sprintf('Creating new region %d/%d',i,n_reg);
fprintf('%s',new_msg);
last_msg = new_msg;

new_parcel{i} = obj.select_atlas_subset(new_lbl_parcels(i), new_lbl, 'exact', 'flatten');
end
fprintf('\n');

% init progress watcher
n_reg = length(new_parcel);
last_msg = sprintf('Merging new region 1/%d',n_reg);
fprintf('%s',last_msg);

% Combine parcels
new_atlas_obj = new_parcel{1};
for i = 2:length(new_parcel)
% update progress watcher
delete_last_chars = length(last_msg);
fprintf('%s',char(8*ones(1,delete_last_chars)))
new_msg = sprintf('Merging new region %d/%d',i,n_reg);
fprintf('%s',new_msg);
last_msg = new_msg;

new_atlas_obj = new_atlas_obj.merge_atlases(new_parcel{i},'noverbose');
end
fprintf('\n');
new_atlas_obj.probability_maps = sparse(new_atlas_obj.probability_maps);

for i = 1:length(kept_lbl_ind)
% increment by 1
old_lbls = obj.(valid_lbls{kept_lbl_ind(i)});

new_atlas_obj.(valid_lbls{i}) = old_lbls(lbl_exp(1:length(lbl_exp)));
end

% delete finer parcellations
for ind = rm_lbl_ind(:)'
new_atlas_obj.(valid_lbls{ind}) = {};
end

end
21 changes: 18 additions & 3 deletions CanlabCore/@atlas/merge_atlases.m
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@

case 'always_replace', always_replace = true; varargin{i} = [];
case 'noreplace', doreplacevoxels = false; varargin{i} = [];
case 'noverbose', doverbose = false; varargin{i} = [];

otherwise , warning(['Unknown input string option:' varargin{i}]);
end
Expand All @@ -50,11 +51,12 @@
% Subset
% -------------------------------------------------------------------------

if ~isempty(varargin) && any(cellfun(@isvector, varargin) || cellfun(@iscell, varargin))
if ~isempty(varargin) && any(cellfun(@isvector, varargin) | cellfun(@iscell, varargin))

if doverbose, disp('Getting atlas subset with select_atlas_subset'); end

atlas_obj_to_add = select_atlas_subset(atlas_obj_to_add, varargin{:});
args = varargin(cellfun(@isvector, varargin) | cellfun(@iscell, varargin));
atlas_obj_to_add = select_atlas_subset(atlas_obj_to_add, args{:});

end

Expand Down Expand Up @@ -135,7 +137,20 @@
atlas_obj_to_add.probability_maps(wh, :) = 0;
end

atlas_obj.probability_maps = [full(atlas_obj.probability_maps) full(atlas_obj_to_add.probability_maps)];
% concatenating sparse matrices first is much faster than converting to full and the
% concatenating full matrices so instead of doing a double conversion
% and concatenating, try concatenating sparse matricies if either of
% them are sparse and then converting to full
%atlas_obj.probability_maps = [full(atlas_obj.probability_maps) full(atlas_obj_to_add.probability_maps)];
if issparse(atlas_obj.probability_maps) || issparse(atlas_obj_to_add.probability_maps)
if ~issparse(atlas_obj.probability_maps)
atlas_obj.probability_maps = sparse(double(atlas_obj.probability_maps));
end
if ~issparse(atlas_obj_to_add.probability_maps)
atlas_obj_to_add.probability_maps = sparse(double(atlas_obj_to_add.probability_maps));
end
end
atlas_obj.probability_maps = full([atlas_obj.probability_maps atlas_obj_to_add.probability_maps]);

atlas_obj = probability_maps_to_region_index(atlas_obj);

Expand Down
7 changes: 5 additions & 2 deletions CanlabCore/@atlas/parcel_data2fmri_data.m
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@
% parcel_stats2statistic_image
%

% Programmers' notes:
% 12/2023 - Tor Wager - fixed fmri_data constructor call to avoid unnecessary bit rate warning

% Create placeholder statistic image

atlas_obj = replace_empty(atlas_obj);
Expand All @@ -77,8 +80,8 @@
obj = fmri_data( ...
struct('dat', single(0 .* placeholder_vec), ...
'volInfo', atlas_obj.volInfo, ...
'removed_voxels', atlas_obj.removed_voxels) ...
);
'removed_voxels', atlas_obj.removed_voxels), ...
[], 'noverbose');

% number of parcels

Expand Down
30 changes: 27 additions & 3 deletions CanlabCore/@atlas/select_atlas_subset.m
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,11 @@
% Options:
% 'flatten' : Flatten integer vector in .dat to a single 1/0 mask. Good for
% creating masks from combinations of regions, or adding a set to another
% atlas as a single atlas region. .probability_maps reduced to single max
% map
% atlas as a single atlas region. .probability_maps is reestimated as
% posterior probability of union of constituent regions under conditional
% independence assumption. If probability maps don't sum to 1 across voxels
% then conditional independence assumption is violated and we default to
% using the maximum probability instead.
%
% 'labels_2' : If you enter any field name in the object, e.g., labels_2,
% the function will search for keyword matches here instead of in obj.labels.
Expand Down Expand Up @@ -229,7 +232,28 @@
% -------------------------------------------------------------------
obj_subset.dat = single(obj_subset.dat > 0);

obj_subset.probability_maps = max(obj_subset.probability_maps, [], 2);
% this is wrong. Consider, you could have a voxel that has 50%
% probability of being voxel A and 50% probability of being voxel B.
% the max value is %50, but the probability that it's one or the other
% must be 100%. Let's check if it's safe to assume conditional
% independence instead.
%
% We don't know if the atlas labels are exhaustive, but if the sum of
% atlas label probabilities exceeds 1 we can conclude that P(A,B) ~=
% P(A) + P(B), because the total density must sum to 1 to be a valid
% probability measure. We check if probabilities exceed 1 with some
% tolerance to deal with floating point errors.
hasdata = any(obj.probability_maps,2);
tol = 1e-7;
if all(sum(obj.probability_maps(hasdata,:), 2) - 1 < tol)
obj_subset.probability_maps = sum(obj_subset.probability_maps, 2);
else
% we have evidence that probabilities are not conditionally
% independent
warning('Probabilities don''t sum to 1, so cannot assume condition independence. New map will use maximum probability instead of sum of probabilities.')
obj_subset.probability_maps = max(obj_subset.probability_maps, [], 2);
end


% Add labels for combined mask

Expand Down
26 changes: 20 additions & 6 deletions CanlabCore/@fmri_data/model_brain_pathway.m
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,20 @@

%% estimate pattern-based connectivity with kfold or user-specified xval

% Convert to cell array of size of kfold. Number of cells are equal to number of k-fold.
% Voxels x Images matrix in each cell (e.i. for 10-fold, images will be devided into 10 cells)
% note by kodiweera: convert to a cell array before the cross validation loop. I took this loop out of the main loop below to make the the program little faster as this is repeatedlty run during the k-fold validation.


for kk=1:max(indices)
to_align_dat_one{kk}=source_one_obj.dat(:,indices==kk);
to_align_dat_two{kk}=source_two_obj.dat(:,indices==kk);
to_align_dat_three{kk}=target_one_obj.dat(:,indices==kk);
to_align_dat_four{kk}=target_two_obj.dat(:,indices==kk);

end


for k=1:max(indices)

un_inds=unique(indices);
Expand All @@ -263,13 +277,13 @@
% Convert to cell array, one cell per subject
% Voxels x Images matrix

for kk=1:max(indices)
to_align_dat_one{kk}=source_one_obj.dat(:,indices==kk);
to_align_dat_two{kk}=source_two_obj.dat(:,indices==kk);
to_align_dat_three{kk}=target_one_obj.dat(:,indices==kk);
to_align_dat_four{kk}=target_two_obj.dat(:,indices==kk);
%for kk=1:max(indices)
% to_align_dat_one{kk}=source_one_obj.dat(:,indices==kk);
% to_align_dat_two{kk}=source_two_obj.dat(:,indices==kk);
% to_align_dat_three{kk}=target_one_obj.dat(:,indices==kk);
% to_align_dat_four{kk}=target_two_obj.dat(:,indices==kk);

end
%end

if do_alignment
% Call Manning hyperalign.m
Expand Down
Loading

0 comments on commit ff735c8

Please sign in to comment.