diff --git a/use_cases/CaImAnpaper/scripts_paper/Preprocessing_and_Figures_4_5_8_MASTER.py b/use_cases/CaImAnpaper/scripts_paper/Preprocessing_and_Figures_4_5_8_MASTER.py new file mode 100644 index 000000000..9586744d6 --- /dev/null +++ b/use_cases/CaImAnpaper/scripts_paper/Preprocessing_and_Figures_4_5_8_MASTER.py @@ -0,0 +1,1155 @@ +# -*- coding: utf-8 -*- + +""" +Created on Fri Aug 25 14:49:36 2017 + +@author: agiovann +""" + +from __future__ import division +from __future__ import print_function +from builtins import zip +from builtins import str +from builtins import map +from builtins import range +from past.utils import old_div +import cv2 +import pickle +from caiman.components_evaluation import select_components_from_metrics +from caiman.base.rois import nf_match_neurons_in_binary_masks +from caiman.utils.utils import apply_magic_wand +from caiman.base.rois import detect_duplicates_and_subsets +try: + cv2.setNumThreads(1) +except: + print('Open CV is naturally single threaded') + +try: + if __IPYTHON__: + print(1) + # this is used for debugging purposes only. allows to reload classes + # when changed + get_ipython().magic('load_ext autoreload') + get_ipython().magic('autoreload 2') +except NameError: + print('Not launched under iPython') + +import caiman as cm +import numpy as np +import os +import time +import pylab as pl +import scipy +from caiman.utils.visualization import plot_contours, view_patches_bar +from caiman.source_extraction.cnmf import cnmf as cnmf +from caiman.components_evaluation import estimate_components_quality_auto +from caiman.cluster import setup_cluster +#%% ANALYSIS MODE AND PARAMETERS +preprocessing_from_scratch = True # whether to run the full pipeline or just creating figures + +if preprocessing_from_scratch: + reload = False + plot_on = False + save_on = False # set to true to recreate +else: + reload = True + plot_on = False + save_on = False + +print_figs = True +skip_refinement = False +backend_patch = 'multiprocessing' +backend_refine = 'multiprocessing' +n_processes = 24 +#%% +def precision_snr(snr_gt, snr_gt_fn, snr_cnmf, snr_cnmf_fp, snr_thrs): + all_results_fake = [] + all_results_OR = [] + all_results_AND = [] + for snr_thr in snr_thrs: + snr_all_gt = np.array(list(snr_gt) + list(snr_gt_fn) + [0]*len(snr_cnmf_fp)) + snr_all_cnmf = np.array(list(snr_cnmf) + [0]*len(snr_gt_fn) + list(snr_cnmf_fp)) + + ind_gt = np.where(snr_all_gt > snr_thr)[0] # comps in gt above threshold + ind_cnmf = np.where(snr_all_cnmf > snr_thr)[0] # same for cnmf + + # precision: how many detected components above a given SNR are true + prec = np.sum(snr_all_gt[ind_cnmf] > 0)/len(ind_cnmf) + + # recall: how many gt components with SNR above the threshold are detected + rec = np.sum(snr_all_cnmf[ind_gt] > 0)/len(ind_gt) + + f1 = 2*prec*rec/(prec + rec) + + results_fake = [prec, rec, f1] + # f1 score with OR condition + + ind_OR = np.union1d(ind_gt, ind_cnmf) + # indeces of components that are above threshold in either direction + ind_gt_OR = np.where(snr_all_gt[ind_OR] > 0)[0] # gt components + ind_cnmf_OR = np.where(snr_all_cnmf[ind_OR] > 0)[0] # cnmf components + prec_OR = np.sum(snr_all_gt[ind_OR][ind_cnmf_OR] > 0)/len(ind_cnmf_OR) + rec_OR = np.sum(snr_all_cnmf[ind_OR][ind_gt_OR] > 0)/len(ind_gt_OR) + f1_OR = 2*prec_OR*rec_OR/(prec_OR + rec_OR) + + results_OR = [prec_OR, rec_OR, f1_OR] + + # f1 score with AND condition + + ind_AND = np.intersect1d(ind_gt, ind_cnmf) + ind_fp = np.intersect1d(ind_cnmf, np.where(snr_all_gt == 0)[0]) + ind_fn = np.intersect1d(ind_gt, np.where(snr_all_cnmf == 0)[0]) + + prec_AND = len(ind_AND)/(len(ind_AND) + len(ind_fp)) + rec_AND = len(ind_AND)/(len(ind_AND) + len(ind_fn)) + f1_AND = 2*prec_AND*rec_AND/(prec_AND + rec_AND) + + results_AND = [prec_AND, rec_AND, f1_AND] + all_results_fake.append(results_fake) + all_results_OR.append(results_OR) + all_results_AND.append(results_AND) + + + return np.array(all_results_fake), np.array(all_results_OR), np.array(all_results_AND) +#%% +global_params = {'min_SNR': 2, # minimum SNR when considering adding a new neuron + 'gnb': 2, # number of background components + 'rval_thr' : 0.80, # spatial correlation threshold + 'min_cnn_thresh' : 0.95, + 'p' : 1, + 'min_rval_thr_rejected': 0, # length of mini batch for OnACID in decay time units (length would be batch_length_dt*decay_time*fr) + 'max_classifier_probability_rejected' : 0.1, # flag for motion correction (set to False to compare directly on the same FOV) + 'max_fitness_delta_accepted' : -20, + 'Npeaks' : 5, + 'min_SNR_patch' : -10, + 'min_r_val_thr_patch': 0.5, + 'fitness_delta_min_patch': -5, + 'update_background_components' : True,# whether to update the background components in the spatial phase + 'low_rank_background' : True, #whether to update the using a low rank approximation. In the False case all the nonzero elements of the background components are updated using hals + #(to be used with one background per patch) + 'only_init_patch' : True, + 'is_dendrites' : False, # if dendritic. In this case you need to set init_method to sparse_nmf + 'alpha_snmf' : None, + 'init_method' : 'greedy_roi', + 'filter_after_patch' : False + } +#%% +params_movies = [] +#%% +params_movie = {'fname': 'N.03.00.t/Yr_d1_498_d2_467_d3_1_order_C_frames_2250_.mmap', + 'gtname':'N.03.00.t/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 25, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 4, # number of components per patch + 'gSig': [8,8], # expected half size of neurons + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix' : 0, + 'fr': 7, + 'decay_time': 0.4, + } +params_movies.append(params_movie.copy()) +#%% N.04.00 +params_movie = {'fname': 'N.04.00.t/Yr_d1_512_d2_512_d3_1_order_C_frames_3000_.mmap', + 'gtname':'N.04.00.t/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 5, # number of components per patch + 'gSig': [5,5], # expected half size of neurons + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix' : 0, + 'fr' : 8, + 'decay_time': 0.5, # rough length of a transient + } +params_movies.append(params_movie.copy()) + + +#%% yuste +params_movie = {'fname': 'YST/Yr_d1_200_d2_256_d3_1_order_C_frames_3000_.mmap', + 'gtname':'YST/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 15, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 8, # number of components per patch + 'gSig': [5,5], # expected half size of neurons + 'fr' : 10, + 'decay_time' : 0.75, + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':0 + } +params_movies.append(params_movie.copy()) +#%% neurofinder 00.00 +params_movie = {'fname': 'N.00.00/Yr_d1_512_d2_512_d3_1_order_C_frames_2936_.mmap', + 'gtname':'N.00.00/joined_consensus_active_regions.npy', + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 6, # number of components per patch + 'gSig': [6,6], # expected half size of neurons + 'decay_time' : 0.4, + 'fr' : 8, + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':10 + } +params_movies.append(params_movie.copy()) +#%% neurofinder 01.01 +params_movie = {'fname': 'N.01.01/Yr_d1_512_d2_512_d3_1_order_C_frames_1825_.mmap', + 'gtname':'N.01.01/joined_consensus_active_regions.npy', + 'merge_thresh': 0.9, # merging threshold, max correlation allow + 'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 6, # number of components per patch + 'gSig': [6,6], # expected half size of neurons + 'decay_time' : 1.4, + 'fr' : 8, + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':2, + } +params_movies.append(params_movie.copy()) +#%% neurofinder 02.00 +params_movie = {#'fname': '/opt/local/Data/labeling/neurofinder.02.00/Yr_d1_512_d2_512_d3_1_order_C_frames_8000_.mmap', + 'fname': 'N.02.00/Yr_d1_512_d2_512_d3_1_order_C_frames_8000_.mmap', + 'gtname':'N.02.00/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 6, # number of components per patch + 'gSig': [5,5], # expected half size of neurons + 'fr' : 30, # imaging rate in Hz + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':10, + 'decay_time': 0.3, + } +params_movies.append(params_movie.copy()) +#%% Sue Ann k53 +params_movie = {#'fname': '/opt/local/Data/labeling/k53_20160530/Yr_d1_512_d2_512_d3_1_order_C_frames_116043_.mmap', + 'fname':'K53/Yr_d1_512_d2_512_d3_1_order_C_frames_116043_.mmap', + 'gtname':'K53/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 9, # number of components per patch + 'gSig': [6,6], # expected half size of neurons + 'fr': 30, + 'decay_time' : 0.3, + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':2, + } +params_movies.append(params_movie.copy()) +#%% J115 +params_movie = {#'fname': '/opt/local/Data/labeling/J115_2015-12-09_L01_ELS/Yr_d1_463_d2_472_d3_1_order_C_frames_90000_.mmap', + 'fname': 'J115/Yr_d1_463_d2_472_d3_1_order_C_frames_90000_.mmap', + 'gtname':'J115/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 20, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 10, # amounpl.it of overlap between the patches in pixels + 'K': 7, # number of components per patch + 'gSig': [7,7], # expected half size of neurons + 'fr' : 30, + 'decay_time' : 0.4, + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':2, + + } +params_movies.append(params_movie.copy()) +#%% J123 +params_movie = {#'fname': '/opt/local/Data/labeling/J123_2015-11-20_L01_0/Yr_d1_458_d2_477_d3_1_order_C_frames_41000_.mmap', + 'fname': 'J123/Yr_d1_458_d2_477_d3_1_order_C_frames_41000_.mmap', + 'gtname':'J123/joined_consensus_active_regions.npy', + # order of the autoregressive system + 'merge_thresh': 0.8, # merging threshold, max correlation allow + 'rf': 40, # half-size of the patches in pixels. rf=25, patches are 50x50 20 + 'stride_cnmf': 20, # amounpl.it of overlap between the patches in pixels + 'K': 10, # number of components per patch + 'gSig': [8,8], # expected half size of neurons + 'decay_time' : 0.5, + 'fr' : 30, + 'n_chunks': 10, + 'swap_dim':False, + 'crop_pix':2, + } +params_movies.append(params_movie.copy()) + +#%% +def myfun(x): + from caiman.source_extraction.cnmf.deconvolution import constrained_foopsi + dc = constrained_foopsi(*x) + return (dc[0],dc[5]) +def fun_exc(x): + from scipy.stats import norm + from caiman.components_evaluation import compute_event_exceptionality + + fluo, param = x + N_samples = np.ceil(param['fr'] * param['decay_time']).astype(np.int) + ev = compute_event_exceptionality(np.atleast_2d(fluo), N=N_samples) + return -norm.ppf(np.exp(np.array(ev[1]) / N_samples)) +#%% +if preprocessing_from_scratch: + all_perfs = [] + all_rvalues = [] + all_comp_SNR_raw =[] + all_comp_SNR_delta = [] + all_predictions = [] + all_labels = [] + all_results = dict() + n_pixels_per_process=4000 + block_size=4000 + num_blocks_per_run=10 + ALL_CCs = [] + + for params_movie in np.array(params_movies)[0:1]: + # params_movie['gnb'] = 3 + params_display = { + 'downsample_ratio': .2, + 'thr_plot': 0.8 + } + + # @params fname name of the movie + fname_new = params_movie['fname'] + print(fname_new) + + # %% LOAD MEMMAP FILE + # fname_new='Yr_d1_501_d2_398_d3_1_order_F_frames_369_.mmap' + Yr, dims, T = cm.load_memmap(fname_new) + d1, d2 = dims + images = np.reshape(Yr.T, [T] + list(dims), order='F') + # TODO: needinfo + Y = np.reshape(Yr, dims + (T,), order='F') + m_images = cm.movie(images) + # TODO: show screenshot 10 + #%% + try: + cm.stop_server() + dview.terminate() + except: + print('No clusters to stop') + + c, dview, n_processes = setup_cluster( + backend=backend_patch, n_processes=n_processes, single_thread=False) + print('Not RELOADING') + if not reload: + # %% RUN ANALYSIS + + # %% correlation image + if (plot_on or save_on): + if False and m_images.shape[0]<10000: + Cn = m_images.local_correlations(swap_dim = params_movie['swap_dim'], frames_per_chunk = 1500) + Cn[np.isnan(Cn)] = 0 + else: + Cn = np.array(cm.load(('/'.join(params_movie['gtname'].split('/')[:-2]+['projections','correlation_image.tif'])))).squeeze() + #pl.imshow(Cn, cmap='gray', vmax=.95) + + check_nan = False + # %% some parameter settings + # order of the autoregressive fit to calcium imaging in general one (slow gcamps) or two (fast gcamps fast scanning) + p = global_params['p'] + # merging threshold, max correlation allowed + merge_thresh = params_movie['merge_thresh'] + # half-size of the patches in pixels. rf=25, patches are 50x50 + rf = params_movie['rf'] + # amounpl.it of overlap between the patches in pixels + stride_cnmf = params_movie['stride_cnmf'] + # number of components per patch + K = params_movie['K'] + # if dendritic. In this case you need to set init_method to sparse_nmf + is_dendrites = global_params['is_dendrites'] + # iinit method can be greedy_roi for round shapes or sparse_nmf for denritic data + init_method = global_params['init_method'] + # expected half size of neurons + gSig = params_movie['gSig'] + # this controls sparsity + alpha_snmf = global_params['alpha_snmf'] + # frame rate of movie (even considering eventual downsampling) + final_frate = params_movie['fr'] + + if global_params['is_dendrites'] == True: + if global_params['init_method'] is not 'sparse_nmf': + raise Exception('dendritic requires sparse_nmf') + if global_params['alpha_snmf'] is None: + raise Exception('need to set a value for alpha_snmf') + # %% Extract spatial and temporal components on patches + t1 = time.time() + # TODO: todocument + # TODO: warnings 3 + print('Strating CNMF') + cnm = cnmf.CNMF(n_processes=n_processes, nb_patch = 1, k=K, gSig=gSig, merge_thresh=params_movie['merge_thresh'], p=global_params['p'], + dview=dview, rf=rf, stride=stride_cnmf, memory_fact=1, + method_init=init_method, alpha_snmf=alpha_snmf, only_init_patch=global_params['only_init_patch'], + gnb=global_params['gnb'], method_deconvolution='oasis',border_pix = params_movie['crop_pix'], + low_rank_background = global_params['low_rank_background'], rolling_sum = True, check_nan=check_nan, + block_size=block_size, num_blocks_per_run=num_blocks_per_run) + cnm = cnm.fit(images) + + A_tot = cnm.A + C_tot = cnm.C + YrA_tot = cnm.YrA + b_tot = cnm.b + f_tot = cnm.f + sn_tot = cnm.sn + print(('Number of components:' + str(A_tot.shape[-1]))) + t_patch = time.time() - t1 + try: + dview.terminate() + except: + pass + c, dview, n_processes = cm.cluster.setup_cluster( + backend=backend_refine, n_processes=n_processes, single_thread=False) + # %% + if plot_on: + pl.figure() + crd = plot_contours(A_tot, Cn, thr=params_display['thr_plot']) + + # %% rerun updating the components to refine + t1 = time.time() + cnm = cnmf.CNMF(n_processes=n_processes, k=A_tot.shape, gSig=gSig, merge_thresh=merge_thresh, p=p, dview=dview, Ain=A_tot, + Cin=C_tot, b_in = b_tot, + f_in=f_tot, rf=None, stride=None, method_deconvolution='oasis',gnb = global_params['gnb'], + low_rank_background = global_params['low_rank_background'], + update_background_components = global_params['update_background_components'], check_nan=check_nan, + n_pixels_per_process=n_pixels_per_process, block_size= block_size, num_blocks_per_run=num_blocks_per_run, skip_refinement=skip_refinement) + + cnm = cnm.fit(images) + t_refine = time.time() - t1 + + A, C, b, f, YrA, sn = cnm.A, cnm.C, cnm.b, cnm.f, cnm.YrA, cnm.sn + # %% again recheck quality of components, stricter criteria + t1 = time.time() + idx_components, idx_components_bad, comp_SNR, r_values, predictionsCNN = estimate_components_quality_auto( + Y, A, C, b, f, YrA, params_movie['fr'], params_movie['decay_time'], gSig, dims, + dview = dview, min_SNR=global_params['min_SNR'], + r_values_min = global_params['rval_thr'], r_values_lowest = global_params['min_rval_thr_rejected'], + Npeaks = global_params['Npeaks'], use_cnn = True, thresh_cnn_min = global_params['min_cnn_thresh'], + thresh_cnn_lowest = global_params['max_classifier_probability_rejected'], + thresh_fitness_delta = global_params['max_fitness_delta_accepted'], gSig_range = None) + # [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*3)] + + + t_eva_comps = time.time() - t1 + print(' ***** ') + print((len(C))) + print((len(idx_components))) + #%% + # all_matches = False + # filter_SNR = False + gt_file = os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'match_masks.npz') + with np.load(gt_file, encoding = 'latin1') as ld: + print(ld.keys()) + # locals().update(ld) + C_gt = ld['C_gt'] + YrA_gt = ld['YrA_gt'] + b_gt = ld['b_gt'] + f_gt = ld['f_gt'] + A_gt = scipy.sparse.coo_matrix(ld['A_gt'][()]) + dims_gt = (ld['d1'],ld['d2']) + + # t1 = time.time() + idx_components_gt, idx_components_bad_gt, comp_SNR_gt, r_values_gt, predictionsCNN_gt = estimate_components_quality_auto( + Y, A_gt, C_gt, b_gt, f_gt, YrA_gt, params_movie['fr'], params_movie['decay_time'], gSig, dims_gt, + dview = dview, min_SNR=global_params['min_SNR'], + r_values_min = global_params['rval_thr'], r_values_lowest = global_params['min_rval_thr_rejected'], + Npeaks = global_params['Npeaks'], use_cnn = True, thresh_cnn_min = global_params['min_cnn_thresh'], + thresh_cnn_lowest = global_params['max_classifier_probability_rejected'], + thresh_fitness_delta = global_params['max_fitness_delta_accepted'], gSig_range = None) + # [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*3)] + + + print(' ***** ') + print((len(C))) + print((len(idx_components_gt))) + + + #%% + min_size_neuro = 3*2*np.pi + max_size_neuro = (2*gSig[0])**2*np.pi + A_gt_thr = cm.source_extraction.cnmf.spatial.threshold_components(A_gt.tocsc()[:,:], dims_gt, medw=None, thr_method='max', maxthr=0.2, nrgthr=0.99, extract_cc=True, + se=None, ss=None, dview=dview) + + A_gt_thr = A_gt_thr > 0 + # size_neurons_gt = A_gt_thr.sum(0) + # idx_size_neuro_gt = np.where((size_neurons_gt>min_size_neuro) & (size_neurons_gt 0 + size_neurons = A_thr.sum(0) + # idx_size_neuro = np.where((size_neurons>min_size_neuro) & (size_neurons=threshold)[0]])) + pl.figure() + matrixMontage(np.squeeze(final_crops[np.where(predictions[:,0]>=threshold)[0]])) + #% + cm.movie(final_crops).play(gain=3,magnification = 6,fr=5) + #% + cm.movie(np.squeeze(final_crops[np.where(predictions[:,1]>=0.95)[0]])).play(gain=2., magnification = 8,fr=5) + #% + cm.movie(np.squeeze(final_crops[np.where(predictions[:,0]>=0.95)[0]]) + ).play(gain=4., magnification = 8,fr=5) + + #%% + thresh_fitness_raw_reject = 0.5 + # global_params['max_classifier_probability_rejected'] = .1 + gSig_range = [list(np.add(i,a)) for i,a in zip(range(0,1),[gSig]*1)] + # global_params['max_classifier_probability_rejected'] = .2 + idx_components, idx_components_bad, cnn_values =\ + select_components_from_metrics( + A, dims, gSig, r_values, comp_SNR , global_params['rval_thr'], + global_params['min_rval_thr_rejected'], global_params['min_SNR'], + thresh_fitness_raw_reject, global_params['min_cnn_thresh'], + global_params['max_classifier_probability_rejected'], True, gSig_range) + + print((len(idx_components))) + #%% + + if plot_on: + pl.figure() + pl.subplot(1, 2, 1) + crd = plot_contours(A.tocsc()[:, idx_components], Cn, thr=params_display['thr_plot'], vmax = 0.85) + pl.subplot(1, 2, 2) + crd = plot_contours(A.tocsc()[:, idx_components_bad], Cn, thr=params_display['thr_plot'], vmax = 0.85) + #%% detect duplicates + thresh_subset = 0.6 + duplicates, indeces_keep, indeces_remove, D, overlap = detect_duplicates_and_subsets( + A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1., predictionsCNN[idx_components], r_values = None, + dist_thr=0.1, min_dist = 10,thresh_subset = thresh_subset) + + idx_components_cnmf = idx_components.copy() + if len(duplicates) > 0: + if plot_on: + pl.figure() + pl.subplot(1,3,1) + pl.imshow(A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.unique(duplicates).flatten()].sum(0)) + pl.colorbar() + pl.subplot(1,3,2) + pl.imshow(A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indeces_keep)[:]].sum(0)) + pl.colorbar() + pl.subplot(1,3,3) + pl.imshow(A_thr[:,idx_components].reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indeces_remove)[:]].sum(0)) + pl.colorbar() + pl.pause(1) + idx_components_cnmf = np.delete(idx_components_cnmf,indeces_remove) + + print('Duplicates CNMF:'+str(len(duplicates))) + + duplicates_gt, indeces_keep_gt, indeces_remove_gt, D_gt, overlap_gt = detect_duplicates_and_subsets( + A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])*1., predictions = None, r_values = None, + dist_thr=0.1, min_dist = 10,thresh_subset = thresh_subset) + + idx_components_gt = np.arange(A_gt_thr.shape[-1]) + if len(duplicates_gt) > 0: + if plot_on: + pl.figure() + pl.subplot(1,3,1) + pl.imshow(A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(duplicates_gt).flatten()].sum(0)) + pl.colorbar() + pl.subplot(1,3,2) + pl.imshow(A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indeces_keep_gt)[:]].sum(0)) + pl.colorbar() + pl.subplot(1,3,3) + pl.imshow(A_gt_thr.reshape([dims[0],dims[1],-1],order = 'F').transpose([2,0,1])[np.array(indeces_remove_gt)[:]].sum(0)) + pl.colorbar() + + pl.pause(1) + idx_components_gt = np.delete(idx_components_gt,indeces_remove_gt) + print('Duplicates gt:'+str(len(duplicates_gt))) + + + #%% + remove_small_neurons = False + if remove_small_neurons: + min_size_neuro = 3*2*np.pi + max_size_neuro = (2*gSig[0])**2*np.pi + size_neurons_gt = A_gt_thr.sum(0) + idx_size_neuro_gt = np.where((size_neurons_gt>min_size_neuro) & (size_neurons_gtmin_size_neuro) & (size_neurons=np.percentile(t,99)])) + + + print('**** Timing online algorithm ****') + print('Time initialization') + print([(tm,nm) for tm,nm in zip(time_init,names)]) + print('Time track activity') + print([(tm,nm) for tm,nm in zip(time_track_activity,names)]) + print('Time update shapes') + print([(tm,nm) for tm,nm in zip(time_track_neurons,names)]) + + +