forked from danielmk/pydentate
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathanalysis_main.py
263 lines (212 loc) · 9.6 KB
/
analysis_main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
# -*- coding: utf-8 -*-
"""
The analysis module provides the function to analyse the data generated by
pyDentate. Data files generated by pyDentate have the .pydd extension.
This extension simply allows to identify files that contain the raw data
as opposed to files that contain for example plots. All data files are python
shelves and shelving is handled by ouropy.
Functions
---------
@author: daniel
"""
import numpy as np
from scipy.signal import convolve
from scipy.stats import pearsonr
from sklearn.preprocessing import normalize
#from burst_generator_inhomogeneous_poisson import inhom_poiss
import shelve
import matplotlib.pyplot as plt
import pylab
import pdb
def tri_filter(signal, kernel_delta):
"""
kernel_delta
width of kernel in datapoints
"""
kernel = np.append(np.arange(kernel_delta/2),np.arange(kernel_delta/2,-1,-1))
# convolve2d has proven PAINFULLY slow for some reason
#signal_conv = convolve2d(signal,kernel,'same')
new_signal = []
for x in signal:
new_signal.append(convolve(x, kernel, 'same'))
signal_conv = np.array(new_signal)
return signal_conv
def correlate_signals(signal1,signal2):
"""Correlates two nxm dimensional signals.
Correlates sig1n with Sign2n along m and then averages all n correlation
coefficients.
Used Pearson Correlation Coefficient. Does not work for us because of NaN
values if one signal is silent.
"""
corrs = []
for idx in range(signal1.shape[0]):
sig1 = signal1[idx] - pylab.std(signal1[idx])
sig2 = signal2[idx] - pylab.std(signal2[idx])
#cor = sum(sig1*sig2)/(len(sig1)*pylab.std(sig1)*pylab.std(sig2))
cor = sum(sig1*sig2)/(np.sqrt(sum(sig1**2))*np.sqrt(sum(sig2**2)))
corrs.append(cor)
corrs = np.array(corrs)
return np.nanmean(corrs)
def avg_dotprod_signals(signal1,signal2):
"""Average dot product of signal1 and signal2 excluding silent cells"""
non_silent_sigs = np.unique(np.concatenate((np.argwhere(signal1.any(axis=1)),np.argwhere(signal2.any(axis=1)))))
non_silent_sigs.sort()
product = signal1[non_silent_sigs]*signal2[non_silent_sigs]
prod_sum = product.sum(axis=1)
avg_dot_product = prod_sum.mean()
return avg_dot_product
def ndp_signals(signal1, signal2):
#pdb.set_trace()
dotproduct = (signal1 * signal2).sum()
#pdb.set_trace()
normalization = np.sqrt((signal1*signal1).sum())*np.sqrt((signal2*signal2).sum())
#pdb.set_trace()
return dotproduct/normalization
def ndp_signals_tresolved(signal1, signal2, len_bin):
pdb.set_trace()
signal1 = np.reshape(signal1[:,0:int(len_bin*int(signal1.shape[1]/len_bin))], (signal1.shape[0],int(signal1.shape[1]/len_bin),len_bin))
signal1 = signal1.sum(axis=2)
pdb.set_trace()
signal2 = np.reshape(signal2[:,0:int(len_bin*int(signal2.shape[1]/len_bin))], (signal2.shape[0],int(signal2.shape[1]/len_bin),len_bin))
signal2 = signal2.sum(axis=2)
pdb.set_trace()
dotproduct = (signal1 * signal2).sum(axis=0)
pdb.set_trace()
normalization = np.sqrt((signal1*signal1).sum(axis=0))*np.sqrt((signal2*signal2).sum(axis=0))
return dotproduct/normalization
def avg_dotprod_signals_tbinned(signal1,signal2, len_bin = 1000):
"""Average dot product of signal1 and signal2"""
# Normalize every time bin invididually
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal1 = signal1[:,0:5,:]
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = signal2[:,0:5,:]
sig1 = []
for x in signal1:
sig1.append(normalize(x,axis=1))
signal1 = np.array(sig1)
sig2 = []
for x in signal2:
sig2.append(normalize(x, axis=1))
signal2 = np.array(sig2)
product = signal1*signal2
prod_sum = product.sum(axis=2)
silent_sigs = np.argwhere(np.logical_and(np.invert(signal1.any(axis=2)), np.invert(signal2.any(axis=2))))
for x in silent_sigs:
prod_sum[x[0],x[1]] = np.NaN
avg_dot_product = np.nanmean(prod_sum, axis=0)
return avg_dot_product
def time_stamps_to_signal(time_stamps, dt_signal, t_start, t_stop):
"""Convert an array of timestamps to a signal where 0 is absence and 1 is
presence of spikes
"""
# Construct a zero array with size corresponding to desired output signal
sig = np.zeros((np.shape(time_stamps)[0],int((t_stop-t_start)/dt_signal)))
# Find the indices where spikes occured according to time_stamps
time_idc = []
for x in time_stamps:
curr_idc = []
if np.any(x):
for y in x:
curr_idc.append((y-t_start)/ dt_signal)
time_idc.append(curr_idc)
# Set the spike indices to 1
try:
for sig_idx, idc in enumerate(time_idc):
sig[sig_idx,np.array(idc,dtype=np.int)] = 1
except:
for sig_idx, idc in enumerate(time_idc):
sig[sig_idx,np.array(idc,dtype=np.int)-1] = 1
return sig
def population_similarity_measure_ob(signal1,signal2, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal1 = signal1.mean(axis=2)
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = signal2.mean(axis=2)
#Normalize
signal1 = normalize(signal1, axis=0)
signal2 = normalize(signal2, axis=0)
product = signal1*signal2
prod_sum = product.sum(axis=0)
return prod_sum.mean()
def similarity_measure_leutgeb_BUGGY(signal1,signal2, len_bin):
"""Oriented on the """
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal1 = signal1.sum(axis=2)
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
signal2 = signal2.sum(axis=2)
corr_vector = []
for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,x], signal2[:,x])[0])
return np.array(corr_vector)
def similarity_measure_leutgeb(signal1,signal2, len_bin):
pdb.set_trace()
signal1 = np.reshape(signal1[:,0:int(len_bin*int(signal1.shape[1]/len_bin))], (signal1.shape[0],int(signal1.shape[1]/len_bin),len_bin))
signal1 = signal1.sum(axis=2)
pdb.set_trace()
signal2 = np.reshape(signal2[:,0:int(len_bin*int(signal2.shape[1]/len_bin))], (signal2.shape[0],int(signal2.shape[1]/len_bin),len_bin))
signal2 = signal2.sum(axis=2)
pdb.set_trace()
corr_vector = []
for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,x], signal2[:,x])[0])
pdb.set_trace()
return np.array(corr_vector)
def sqrt_diff(signal1, signal2, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
subtr = np.sqrt((signal1 - signal2)**2)
subtr_sum = subtr.sum(axis = 2).sum(axis=0)
print(subtr_sum)
return subtr_sum
def sqrt_diff_norm_TRESOLVED(signal1, signal2, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
signal2 = np.reshape(signal2[:,0:int((signal2.shape[1]/len_bin)*len_bin)],
(signal2.shape[0], signal2.shape[1]/len_bin,len_bin), len_bin)
total_spikes = signal1.sum(axis=2).sum(axis=0) + signal1.sum(axis=2).sum(axis=0)
subtr = np.sqrt((signal1 - signal2)**2)
subtr_sum = subtr.sum(axis = 2).sum(axis=0) / total_spikes
print(subtr_sum)
return subtr_sum
def coactivity(signal1, signal2):
coactive = (np.array(signal1 >0) * np.array(signal2 > 0)).sum()
total_active = np.logical_or(np.array(signal1 >0), np.array(signal2 > 0)).sum()
return coactive/float(total_active)
def overlap(signal1, signal2):
total_overlap = ((signal1 > 0) == (signal2 >0)).sum()
return total_overlap / float(len(signal1))
def sqrt_diff_norm(signal1, signal2, len_bin):
total_spikes = signal1.sum() + signal2.sum()
subtr = np.sqrt((signal1 - signal2)**2).sum()
return subtr/total_spikes
def inner_pearsonr_BUGGY(signal1, len_bin):
signal1 = np.reshape(signal1[:,0:int((signal1.shape[1]/len_bin)*len_bin)],
(signal1.shape[0], signal1.shape[1]/len_bin,len_bin), len_bin)
return signal1
signal1 = signal1.sum(axis=2)
corr_vector = []
for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,0], signal1[:,x])[0])
return corr_vector
def inner_pearsonr(signal1, len_bin):
signal1 = np.reshape(signal1, (signal1.shape[0],signal1.shape[1]/len_bin,len_bin))
signal1 = signal1.sum(axis=2)
corr_vector = []
for x in range(signal1.shape[1]):
corr_vector.append(pearsonr(signal1[:,0], signal1[:,x])[0])
return corr_vector
if __name__ == '__main__':
temporal_patterns = inhom_poiss()
time_sig = time_stamps_to_signal(temporal_patterns,
dt_signal=0.1,
t_start=0,
t_stop=1000)