diff --git a/doc/authors.rst b/doc/authors.rst index af85e9e79..b0f444e76 100644 --- a/doc/authors.rst +++ b/doc/authors.rst @@ -37,7 +37,7 @@ contribution, and may not be the current affiliation of a contributor. * Alessandra Stella [1] * Peter Bouss [1] * Alexander van Meegen [1] -* Aitor Morales-Gregorio [1] +* Aitor Morales-Gregorio [1, 14] * Cristiano Köhler [1] * Paulina Dąbrowska [1] * Jan Lewen [1] @@ -51,6 +51,8 @@ contribution, and may not be the current affiliation of a contributor. * Florian Porrmann [13] * Sarah Pilz [13] * Oliver Kloß [1] +* Jonas Oberste-Frielinghaus [1, 14] +* Sven Krausse [1] * Felician Richter [12] 1. Institute of Neuroscience and Medicine (INM-6) and Institute for Advanced Simulation (IAS-6) and JARA-Institute Brain Structure-Function Relationships (INM-10), Jülich Research Centre, Jülich, Germany diff --git a/elephant/statistics.py b/elephant/statistics.py index 0ab389572..d15699ba1 100644 --- a/elephant/statistics.py +++ b/elephant/statistics.py @@ -1357,6 +1357,11 @@ class Complexity(object): bin edge into the following bin. This can be adjusted using the tolerance parameter and turned off by setting `tolerance=None`. + Due to the rounding error correction an indexing error would occur if + spikes were in the last bin. To avoid the t_stop of the original spike + trains is modified to add one more bin in the cases where a spike is found + at the last time bin. + See also -------- elephant.conversion.BinnedSpikeTrain @@ -1445,6 +1450,15 @@ def __init__(self, spiketrains, if bin_size is None and sampling_rate is not None: self.bin_size = 1 / self.sampling_rate + # Check if spikes happen in the last bin + for st in self.input_spiketrains: + # Extend t_stop to avoid indexing problems + if np.isclose(self.t_stop.magnitude, st.times[-1].magnitude): + self.t_stop += self.bin_size + for st in self.input_spiketrains: + st.t_stop = self.t_stop + break + if spread == 0: self.time_histogram, self.complexity_histogram = \ self._histogram_no_spread() diff --git a/elephant/test/test_spike_train_synchrony.py b/elephant/test/test_spike_train_synchrony.py index 58be525eb..2f564ff7a 100644 --- a/elephant/test/test_spike_train_synchrony.py +++ b/elephant/test/test_spike_train_synchrony.py @@ -193,6 +193,8 @@ def _test_template(self, spiketrains, correct_complexities, sampling_rate, spread, deletion_threshold=2, mode='delete', in_place=False, binary=True): + intial_t_stop = spiketrains[0].t_stop.magnitude + synchrofact_obj = Synchrotool( spiketrains, sampling_rate=sampling_rate, @@ -233,6 +235,9 @@ def _test_template(self, spiketrains, correct_complexities, sampling_rate, cleaned_spike_times): assert_array_almost_equal(cleaned_st, correct_st) + assert_array_almost_equal(spiketrains[0].t_stop.magnitude, + intial_t_stop) + def test_no_synchrofacts(self): # nothing to find here @@ -390,16 +395,16 @@ def test_binning_for_input_with_rounding_errors(self): sampling_rate = 30000 / pq.s - spiketrains = [neo.SpikeTrain(np.arange(1000) * pq.s / 30000, + spiketrains = [neo.SpikeTrain(np.arange(10) * pq.s / 30000, t_stop=.1 * pq.s), - neo.SpikeTrain(np.arange(2000, step=2) * pq.s / 30000, + neo.SpikeTrain(np.arange(20, step=2) * pq.s / 30000, t_stop=.1 * pq.s)] - first_annotations = np.ones(1000) + first_annotations = np.ones(10) first_annotations[::2] = 2 - second_annotations = np.ones(1000) - second_annotations[:500] = 2 + second_annotations = np.ones(10) + second_annotations[:5] = 2 correct_annotations = np.array([first_annotations, second_annotations]) @@ -408,6 +413,28 @@ def test_binning_for_input_with_rounding_errors(self): spread=0, mode='delete', in_place=True, deletion_threshold=2) + def test_binning_indexing_last_bin_synchrofact(self): + + # a test with inputs divided by 30000 which leads to rounding errors + # these errors have to be accounted for by proper binning; + # check if we still get the correct result + # If there is a synchrofact in the last bin there was an indexing + # error due to the rounding error correction + + sampling_rate = 30000 / pq.s + + st = neo.SpikeTrain(np.arange(10) * pq.s / 30000, t_stop=.1 * pq.s) + + spiketrains = [st, st] + + annotations = 2*np.ones(10) + + correct_annotations = np.array([annotations, annotations]) + + self._test_template(spiketrains, correct_annotations, sampling_rate, + spread=0, mode='delete', in_place=True, + deletion_threshold=2) + def test_correct_transfer_of_spiketrain_attributes(self): # for delete=True the spiketrains in the block are changed, diff --git a/requirements/environment.yml b/requirements/environment.yml index fa8fb6e1d..bb56c3ad0 100644 --- a/requirements/environment.yml +++ b/requirements/environment.yml @@ -13,6 +13,6 @@ dependencies: - statsmodels - jinja2 - pip: - - neo>=0.10.0 + - neo>=0.13.0 - viziphant # neo, viziphant can be removed once it is integrated into requirements-tutorials.txt diff --git a/requirements/requirements.txt b/requirements/requirements.txt index b3b9d6f98..f82d0db2a 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,4 +1,4 @@ -neo>=0.10.0 +neo>=0.13.0 numpy>=1.19.5, <2 quantities>=0.14.1 scipy>=1.10.0