diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 52b7823..779c7d1 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -19,7 +19,7 @@ jobs:
strategy:
max-parallel: 5
matrix:
- python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
+ python-version: ['3.9', '3.10', '3.11', '3.12']
defaults:
run:
shell: bash -el {0}
diff --git a/README.md b/README.md
index 191df08..fef6756 100644
--- a/README.md
+++ b/README.md
@@ -22,7 +22,7 @@ Developed and maintained by the [Boston University Neurophotonics Center](https:
## Installation
`pip install snirf`
-pysnirf2 requires Python > 3.6.
+pysnirf2 requires Python > 3.9.
# Features
diff --git a/docs/README.md b/docs/README.md
index b67bc01..b441baa 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -16,13 +16,14 @@
- [`pysnirf2.IndexedGroup`](./pysnirf2.md#class-indexedgroup)
- [`pysnirf2.MeasurementList`](./pysnirf2.md#class-measurementlist): Interface for indexed group `MeasurementList`.
- [`pysnirf2.MeasurementListElement`](./pysnirf2.md#class-measurementlistelement): Wrapper for an element of indexed group `MeasurementList`.
+- [`pysnirf2.MeasurementLists`](./pysnirf2.md#class-measurementlists)
- [`pysnirf2.MetaDataTags`](./pysnirf2.md#class-metadatatags)
- [`pysnirf2.Nirs`](./pysnirf2.md#class-nirs): Interface for indexed group `Nirs`.
- [`pysnirf2.NirsElement`](./pysnirf2.md#class-nirselement): Wrapper for an element of indexed group `Nirs`.
- [`pysnirf2.Probe`](./pysnirf2.md#class-probe)
- [`pysnirf2.Snirf`](./pysnirf2.md#class-snirf)
- [`pysnirf2.SnirfConfig`](./pysnirf2.md#class-snirfconfig): Structure containing Snirf-wide data and settings.
-- [`pysnirf2.SnirfFormatError`](./pysnirf2.md#class-snirfformaterror): Raised when SNIRF-specific error prevents file from loading properly.
+- [`pysnirf2.SnirfFormatError`](./pysnirf2.md#class-snirfformaterror): Raised when SNIRF-specific error prevents file from loading or saving properly.
- [`pysnirf2.Stim`](./pysnirf2.md#class-stim)
- [`pysnirf2.StimElement`](./pysnirf2.md#class-stimelement)
- [`pysnirf2.ValidationIssue`](./pysnirf2.md#class-validationissue): Information about the validity of a given SNIRF file location.
diff --git a/docs/pysnirf2.md b/docs/pysnirf2.md
index 3b00316..2042caa 100644
--- a/docs/pysnirf2.md
+++ b/docs/pysnirf2.md
@@ -24,7 +24,7 @@ Maintained by the Boston University Neurophotonics Center
---
-
+
## function `loadSnirf`
@@ -63,7 +63,7 @@ Returns a `Snirf` object loaded from path if a SNIRF file exists there. Takes th
---
-
+
## function `saveSnirf`
@@ -83,7 +83,7 @@ Saves a SNIRF file to disk.
---
-
+
## function `validateSnirf`
@@ -101,7 +101,7 @@ Returns truthy ValidationResult instance which holds detailed results of validat
## class `SnirfFormatError`
-Raised when SNIRF-specific error prevents file from loading properly.
+Raised when SNIRF-specific error prevents file from loading or saving properly.
@@ -109,14 +109,14 @@ Raised when SNIRF-specific error prevents file from loading properly.
---
-
+
## class `ValidationIssue`
Information about the validity of a given SNIRF file location.
Properties: location: A relative HDF5 name corresponding to the location of the issue name: A string describing the issue. Must be predefined in `_CODES` id: An integer corresponding to the predefined error type severity: An integer ranking the serverity level of the issue. 0 OK, Nothing remarkable 1 Potentially useful `INFO` 2 `WARNING`, the file is valid but exhibits undefined behavior or features marked deprecation 3 `FATAL`, The file is invalid. message: A string containing a more verbose description of the issue
-
+
### method `__init__`
@@ -133,7 +133,7 @@ __init__(name: str, location: str)
---
-
+
### method `dictize`
@@ -146,7 +146,7 @@ Return dictionary representation of Issue.
---
-
+
## class `ValidationResult`
The result of Snirf file validation routines.
@@ -155,10 +155,10 @@ Validation results in a list of issues. Each issue records information about the
```
= .validate()
- = validateSnirf()
+ = validateSnirf()
```
-
+
### method `__init__`
@@ -209,7 +209,7 @@ A list of the `WARNING` issues catalogued during validation.
---
-
+
### method `display`
@@ -227,7 +227,7 @@ Reads the contents of an `h5py.Dataset` to an array of `dtype=str`.
---
-
+
### method `is_valid`
@@ -239,7 +239,7 @@ Returns True if no `FATAL` issues were catalogued during validation.
---
-
+
### method `serialize`
@@ -252,14 +252,14 @@ Render serialized JSON ValidationResult.
---
-
+
## class `SnirfConfig`
Structure containing Snirf-wide data and settings.
Properties: logger (logging.Logger): The logger that the Snirf instance writes to dynamic_loading (bool): If True, data is loaded from the HDF5 file only on access via property
-
+
### method `__init__`
@@ -277,14 +277,14 @@ __init__()
---
-
+
## class `Group`
-
+
### method `__init__`
@@ -318,13 +318,11 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
-
+
### method `is_empty`
@@ -342,7 +340,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `save`
@@ -370,14 +368,14 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `IndexedGroup`
-
+
### method `__init__`
@@ -411,7 +409,7 @@ The filename the Snirf object was loaded from and will save to.
---
-
+
### method `append`
@@ -429,7 +427,7 @@ Append a new Group to the IndexedGroup.
---
-
+
### method `appendGroup`
@@ -443,7 +441,7 @@ Creates an empty Group with the appropriate name at the end of the list of Group
---
-
+
### method `insert`
@@ -462,7 +460,7 @@ Insert a new Group into the IndexedGroup.
---
-
+
### method `insertGroup`
@@ -482,7 +480,7 @@ Creates an empty Group with a placeholder name within the list of Groups managed
---
-
+
### method `is_empty`
@@ -500,7 +498,7 @@ Returns True if the Indexed Group has no member Groups with contents.
---
-
+
### method `save`
@@ -530,14 +528,14 @@ When saving, the naming convention defined by the SNIRF spec is enforced: groups
---
-
+
## class `MetaDataTags`
-
+
### method `__init__`
@@ -644,13 +642,11 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
-
+
### method `add`
@@ -669,7 +665,7 @@ Add a new tag to the list.
---
-
+
### method `is_empty`
@@ -687,7 +683,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `remove`
@@ -705,7 +701,7 @@ Remove a tag from the list. You cannot remove a required tag.
---
-
+
### method `save`
@@ -733,14 +729,212 @@ Group level save to a SNIRF file on disk.
---
-
+
+
+## class `MeasurementLists`
+
+
+
+
+
+
+### method `__init__`
+
+```python
+__init__(var, cfg: SnirfConfig)
+```
+
+
+
+
+
+
+---
+
+#### property dataType
+
+SNIRF field `dataType`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. See Appendix for list of possible values.
+
+---
+
+#### property dataTypeIndex
+
+SNIRF field `dataTypeIndex`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Data-type specific parameter indices. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so `dataTimeIndex` must be a 2-D array with 2 columns that index the additional parameters.
+
+---
+
+#### property dataTypeLabel
+
+SNIRF field `dataTypeLabel`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Data-type label. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+---
+
+#### property dataUnit
+
+SNIRF field `dataUnit`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+International System of Units (SI units) identifier for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+---
+
+#### property detectorGain
+
+SNIRF field `detectorGain`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Units are optionally defined in `metaDataTags`.
+
+---
+
+#### property detectorIndex
+
+SNIRF field `detectorIndex`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Detector indices for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+---
+
+#### property filename
+
+The filename the Snirf object was loaded from and will save to.
+
+None if not associated with a Group on disk.
+
+---
+
+#### property location
+
+The HDF5 relative location indentifier.
+
+---
+
+#### property sourceIndex
+
+SNIRF field `sourceIndex`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Source indices for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+
+
+---
+
+#### property sourcePower
+
+SNIRF field `sourcePower`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Units are optionally defined in `metaDataTags`.
+
+---
+
+#### property wavelengthActual
+
+SNIRF field `wavelengthActual`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Actual (measured) wavelength in nm, if available, for the source in each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+---
+
+#### property wavelengthEmissionActual
+
+SNIRF field `wavelengthEmissionActual`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Actual (measured) emission wavelength in nm, if available, for the source in each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+
+
+---
+
+#### property wavelengthIndex
+
+SNIRF field `wavelengthIndex`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+Index of the "nominal" wavelength (in `probe.wavelengths`) for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+
+
+---
+
+
+
+### method `is_empty`
+
+```python
+is_empty()
+```
+
+If the Group has no member Groups or Datasets.
+
+
+
+**Returns:**
+
+ - `bool`: True if empty, False if not
+
+---
+
+
+
+### method `save`
+
+```python
+save(*args)
+```
+
+Group level save to a SNIRF file on disk.
+
+
+
+**Args:**
+
+ - `args` (str or h5py.File): A path to a closed SNIRF file on disk or an open `h5py.File` instance
+
+
+
+**Examples:**
+ save can be called on a Group already on disk to overwrite the current contents: ``` mysnirf.nirs[0].probe.save()```
+
+ or using a new filename to write the Group there:
+ >>> mysnirf.nirs[0].probe.save()
+
+
+
+---
+
+
## class `Probe`
-
+
### method `__init__`
@@ -877,8 +1071,6 @@ This is a 2-D array storing the neurological landmark positions measurement fro
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
#### property momentOrders
@@ -943,16 +1135,6 @@ This field describes the time delays (in `TimeUnit` units) used for gated time d
---
-#### property useLocalIndex
-
-SNIRF field `useLocalIndex`.
-
-If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
-
-For modular NIRS systems, setting this flag to a non-zero integer indicates that `measurementList(k).sourceIndex` and `measurementList(k).detectorIndex` are module-specific local-indices. One must also include `measurementList(k).moduleIndex`, or when cross-module channels present, both `measurementList(k).sourceModuleIndex` and `measurementList(k).detectorModuleIndex` in the `measurementList` structure in order to restore the global indices of the sources/detectors.
-
----
-
#### property wavelengths
SNIRF field `wavelengths`.
@@ -983,7 +1165,7 @@ Please note that this field stores the "nominal" emission wavelengths. If the pr
---
-
+
### method `is_empty`
@@ -1001,7 +1183,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `save`
@@ -1029,12 +1211,12 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `NirsElement`
Wrapper for an element of indexed group `Nirs`.
-
+
### method `__init__`
@@ -1085,8 +1267,6 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
#### property metaDataTags
@@ -1123,7 +1303,7 @@ This is an array describing any stimulus conditions. Each element of the array
---
-
+
### method `is_empty`
@@ -1141,7 +1321,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `save`
@@ -1169,7 +1349,7 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `Nirs`
Interface for indexed group `Nirs`.
@@ -1180,7 +1360,7 @@ To add or remove an element from the list, use the `appendGroup` method and the
This group stores one set of NIRS data. This can be extended by adding the count number (e.g. `/nirs1`, `/nirs2`,...) to the group name. This is intended to allow the storage of 1 or more complete NIRS datasets inside a single SNIRF document. For example, a two-subject hyperscanning can be stored using the notation * `/nirs1` = first subject's data * `/nirs2` = second subject's data The use of a non-indexed (e.g. `/nirs`) entry is allowed when only one entry is present and is assumed to be entry 1.
-
+
### method `__init__`
@@ -1203,7 +1383,7 @@ The filename the Snirf object was loaded from and will save to.
---
-
+
### method `append`
@@ -1221,7 +1401,7 @@ Append a new Group to the IndexedGroup.
---
-
+
### method `appendGroup`
@@ -1235,7 +1415,7 @@ Creates an empty Group with the appropriate name at the end of the list of Group
---
-
+
### method `insert`
@@ -1254,7 +1434,7 @@ Insert a new Group into the IndexedGroup.
---
-
+
### method `insertGroup`
@@ -1274,7 +1454,7 @@ Creates an empty Group with a placeholder name within the list of Groups managed
---
-
+
### method `is_empty`
@@ -1292,7 +1472,7 @@ Returns True if the Indexed Group has no member Groups with contents.
---
-
+
### method `save`
@@ -1322,14 +1502,14 @@ When saving, the naming convention defined by the SNIRF spec is enforced: groups
---
-
+
## class `DataElement`
-
+
### method `__init__`
@@ -1342,6 +1522,18 @@ __init__(gid: GroupID, cfg: SnirfConfig)
+---
+
+#### property dataOffset
+
+SNIRF field `dataOffset`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+This stores an optional offset value per channel, which, when added to `/nirs(i)/data(j)/dataTimeSeries`, results in absolute data values.
+
+The length of this array is equal to the as represented by the second dimension in the `dataTimeSeries`.
+
---
#### property dataTimeSeries
@@ -1370,8 +1562,6 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
#### property measurementList
@@ -1386,6 +1576,20 @@ Each element of the array is a structure which describes the measurement condit
---
+#### property measurementLists
+
+SNIRF field `measurementLists`.
+
+If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+
+The group for measurement list variables which map the data array onto the probe geometry (sources and detectors), data type, and wavelength. This group's datasets are arrays with size ``, with each position describing the corresponding column in the data matrix. (i.e. the values at `measurementLists/sourceIndex(3)` and `measurementLists/detectorIndex(3)` correspond to `dataTimeSeries(:,3)`).
+
+This group is required only if the indexed-group format `/nirs(i)/data(j)/measurementList(k)` is not used to encode the measurement list. `measurementLists` is an alternative that may offer better performance for larger probes.
+
+The arrays of `measurementLists` are:
+
+---
+
#### property time
SNIRF field `time`.
@@ -1402,7 +1606,7 @@ Chunked data is allowed to support real-time streaming of data in this array.
---
-
+
### method `is_empty`
@@ -1420,7 +1624,39 @@ If the Group has no member Groups or Datasets.
---
-
+
+
+### method `measurementList_to_measurementLists`
+
+```python
+measurementList_to_measurementLists()
+```
+
+Converts `measurementList` to a `measurementLists` structure if it is present.
+
+This method will populate the `measurementLists` Group structure with the contents of the `measurementList` indexed Group.
+
+The `measurementList` indexedGroup is not be removed.
+
+---
+
+
+
+### method `measurementLists_to_measurementList`
+
+```python
+measurementLists_to_measurementList()
+```
+
+Converts `measurementLists` to a `measurementList` indexed Group structure if it is present.
+
+This method will create new `measurementList` indexed Group entries populated with the contents of the `measurementLists` Group.
+
+The `measurementList` Group is not removed.
+
+---
+
+
### method `save`
@@ -1448,14 +1684,14 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `Data`
-
+
### method `__init__`
@@ -1478,7 +1714,7 @@ The filename the Snirf object was loaded from and will save to.
---
-
+
### method `append`
@@ -1496,7 +1732,7 @@ Append a new Group to the IndexedGroup.
---
-
+
### method `appendGroup`
@@ -1510,7 +1746,7 @@ Creates an empty Group with the appropriate name at the end of the list of Group
---
-
+
### method `insert`
@@ -1529,7 +1765,7 @@ Insert a new Group into the IndexedGroup.
---
-
+
### method `insertGroup`
@@ -1549,7 +1785,7 @@ Creates an empty Group with a placeholder name within the list of Groups managed
---
-
+
### method `is_empty`
@@ -1567,7 +1803,7 @@ Returns True if the Indexed Group has no member Groups with contents.
---
-
+
### method `save`
@@ -1597,12 +1833,12 @@ When saving, the naming convention defined by the SNIRF spec is enforced: groups
---
-
+
## class `MeasurementListElement`
Wrapper for an element of indexed group `MeasurementList`.
-
+
### method `__init__`
@@ -1633,7 +1869,7 @@ SNIRF field `dataTypeIndex`.
If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
-Data-type specific parameter indices. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the probe structure, as detailed below. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so the data type index must be a vector with 2 elements that index the additional parameters. One use of this parameter is as a stimulus condition index when `measurementList(k).dataType = 99999` (i.e, `processed` and `measurementList(k).dataTypeLabel = 'HRF ...'` .
+Data-type specific parameter index. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the probe structure, as detailed below. Note that where multiple parameters are required, the same index must be used into each (examples include data types such as Time Domain and Diffuse Correlation Spectroscopy). One use of this parameter is as a stimulus condition index when `measurementList(k).dataType = 99999` (i.e, `processed` and `measurementList(k).dataTypeLabel = 'HRF ...'` .
---
@@ -1665,33 +1901,21 @@ If dynamic_loading=True, the data is loaded from the SNIRF file only when access
Detector gain
----
-
-#### property detectorIndex
-
-SNIRF field `detectorIndex`.
+For example, if `measurementList5` is a structure with `sourceIndex=2`, `detectorIndex=3`, `wavelengthIndex=1`, `dataType=1`, `dataTypeIndex=1` would imply that the data in the 5th column of the `dataTimeSeries` variable was measured with source #2 and detector #3 at wavelength #1. Wavelengths (in nanometers) are described in the `probe.wavelengths` variable (described later). The data type in this case is 1, implying that it was a continuous wave measurement. The complete list of currently supported data types is found in the Appendix. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the `probe` structure, as detailed below. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so the data type index must be a vector with 2 elements that index the additional parameters.
-If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
+`sourcePower` provides the option for information about the source power for that channel to be saved along with the data. The units are not defined, unless the user takes the option of using a `metaDataTag` described below to define, for instance, `sourcePowerUnit`. `detectorGain` provides the option for information about the detector gain for that channel to be saved along with the data.
-Index of the detector.
+Note: The source indices generally refer to the optode naming (probe positions) and not necessarily the physical laser numbers on the instrument. The same is true for the detector indices. Each source optode would generally, but not necessarily, have 2 or more wavelengths (hence lasers) plugged into it in order to calculate deoxy- and oxy-hemoglobin concentrations. The data from these two wavelengths will be indexed by the same source, detector, and data type values, but have different wavelength indices. Using the same source index for lasers at the same location but with different wavelengths simplifies the bookkeeping for converting intensity measurements into concentration changes. As described below, optional variables `probe.sourceLabels` and `probe.detectorLabels` are provided for indicating the instrument specific label for sources and detectors.
---
-#### property detectorModuleIndex
+#### property detectorIndex
-SNIRF field `detectorModuleIndex`.
+SNIRF field `detectorIndex`.
If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
-Index of the module that contains the detector of the channel. This index must be used together with `sourceModuleIndex`, and can not be used when `moduleIndex` presents.
-
-
-
-For example, if `measurementList5` is a structure with `sourceIndex=2`, `detectorIndex=3`, `wavelengthIndex=1`, `dataType=1`, `dataTypeIndex=1` would imply that the data in the 5th column of the `dataTimeSeries` variable was measured with source #2 and detector #3 at wavelength #1. Wavelengths (in nanometers) are described in the `probe.wavelengths` variable (described later). The data type in this case is 1, implying that it was a continuous wave measurement. The complete list of currently supported data types is found in the Appendix. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the `probe` structure, as detailed below. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so the data type index must be a vector with 2 elements that index the additional parameters.
-
-`sourcePower` provides the option for information about the source power for that channel to be saved along with the data. The units are not defined, unless the user takes the option of using a `metaDataTag` described below to define, for instance, `sourcePowerUnit`. `detectorGain` provides the option for information about the detector gain for that channel to be saved along with the data.
-
-Note: The source indices generally refer to the optode naming (probe positions) and not necessarily the physical laser numbers on the instrument. The same is true for the detector indices. Each source optode would generally, but not necessarily, have 2 or more wavelengths (hence lasers) plugged into it in order to calculate deoxy- and oxy-hemoglobin concentrations. The data from these two wavelengths will be indexed by the same source, detector, and data type values, but have different wavelength indices. Using the same source index for lasers at the same location but with different wavelengths simplifies the bookkeeping for converting intensity measurements into concentration changes. As described below, optional variables `probe.sourceLabels` and `probe.detectorLabels` are provided for indicating the instrument specific label for sources and detectors.
+Index of the detector.
---
@@ -1707,18 +1931,6 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
----
-
-#### property moduleIndex
-
-SNIRF field `moduleIndex`.
-
-If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
-
-Index of a repeating module. If `moduleIndex` is provided while `useLocalIndex` is set to `true`, then, both `measurementList(k).sourceIndex` and `measurementList(k).detectorIndex` are assumed to be the local indices of the same module specified by `moduleIndex`. If the source and detector are located on different modules, one must use `sourceModuleIndex` and `detectorModuleIndex` instead to specify separate parent module indices. See below.
-
---
#### property sourceIndex
@@ -1731,16 +1943,6 @@ Index of the source.
----
-
-#### property sourceModuleIndex
-
-SNIRF field `sourceModuleIndex`.
-
-If dynamic_loading=True, the data is loaded from the SNIRF file only when accessed through the getter
-
-Index of the module that contains the source of the channel. This index must be used together with `detectorModuleIndex`, and can not be used when `moduleIndex` presents.
-
---
#### property sourcePower
@@ -1787,7 +1989,7 @@ Index of the "nominal" wavelength (in `probe.wavelengths`).
---
-
+
### method `is_empty`
@@ -1805,7 +2007,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `save`
@@ -1833,7 +2035,7 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `MeasurementList`
Interface for indexed group `MeasurementList`.
@@ -1846,7 +2048,7 @@ The measurement list. This variable serves to map the data array onto the probe
Each element of the array is a structure which describes the measurement conditions for this data with the following fields:
-
+
### method `__init__`
@@ -1869,7 +2071,7 @@ The filename the Snirf object was loaded from and will save to.
---
-
+
### method `append`
@@ -1887,7 +2089,7 @@ Append a new Group to the IndexedGroup.
---
-
+
### method `appendGroup`
@@ -1901,7 +2103,7 @@ Creates an empty Group with the appropriate name at the end of the list of Group
---
-
+
### method `insert`
@@ -1920,7 +2122,7 @@ Insert a new Group into the IndexedGroup.
---
-
+
### method `insertGroup`
@@ -1940,7 +2142,7 @@ Creates an empty Group with a placeholder name within the list of Groups managed
---
-
+
### method `is_empty`
@@ -1958,7 +2160,7 @@ Returns True if the Indexed Group has no member Groups with contents.
---
-
+
### method `save`
@@ -1988,14 +2190,14 @@ When saving, the naming convention defined by the SNIRF spec is enforced: groups
---
-
+
## class `StimElement`
-
+
### method `__init__`
@@ -2046,8 +2248,6 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
#### property name
@@ -2062,7 +2262,7 @@ This is a string describing the jth stimulus condition.
---
-
+
### method `is_empty`
@@ -2080,7 +2280,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `save`
@@ -2108,14 +2308,14 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `Stim`
-
+
### method `__init__`
@@ -2138,7 +2338,7 @@ The filename the Snirf object was loaded from and will save to.
---
-
+
### method `append`
@@ -2156,7 +2356,7 @@ Append a new Group to the IndexedGroup.
---
-
+
### method `appendGroup`
@@ -2170,7 +2370,7 @@ Creates an empty Group with the appropriate name at the end of the list of Group
---
-
+
### method `insert`
@@ -2189,7 +2389,7 @@ Insert a new Group into the IndexedGroup.
---
-
+
### method `insertGroup`
@@ -2209,7 +2409,7 @@ Creates an empty Group with a placeholder name within the list of Groups managed
---
-
+
### method `is_empty`
@@ -2227,7 +2427,7 @@ Returns True if the Indexed Group has no member Groups with contents.
---
-
+
### method `save`
@@ -2257,14 +2457,14 @@ When saving, the naming convention defined by the SNIRF spec is enforced: groups
---
-
+
## class `AuxElement`
-
+
### method `__init__`
@@ -2311,8 +2511,6 @@ None if not associated with a Group on disk.
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
#### property name
@@ -2349,7 +2547,7 @@ This variable specifies the offset of the file time origin relative to absolute
---
-
+
### method `is_empty`
@@ -2367,7 +2565,7 @@ If the Group has no member Groups or Datasets.
---
-
+
### method `save`
@@ -2395,14 +2593,14 @@ Group level save to a SNIRF file on disk.
---
-
+
## class `Aux`
-
+
### method `__init__`
@@ -2425,7 +2623,7 @@ The filename the Snirf object was loaded from and will save to.
---
-
+
### method `append`
@@ -2443,7 +2641,7 @@ Append a new Group to the IndexedGroup.
---
-
+
### method `appendGroup`
@@ -2457,7 +2655,7 @@ Creates an empty Group with the appropriate name at the end of the list of Group
---
-
+
### method `insert`
@@ -2476,7 +2674,7 @@ Insert a new Group into the IndexedGroup.
---
-
+
### method `insertGroup`
@@ -2496,7 +2694,7 @@ Creates an empty Group with a placeholder name within the list of Groups managed
---
-
+
### method `is_empty`
@@ -2514,7 +2712,7 @@ Returns True if the Indexed Group has no member Groups with contents.
---
-
+
### method `save`
@@ -2544,14 +2742,14 @@ When saving, the naming convention defined by the SNIRF spec is enforced: groups
---
-
+
## class `Snirf`
-
+
### method `__init__`
@@ -2588,8 +2786,6 @@ This is a string that specifies the version of the file format. This document
The HDF5 relative location indentifier.
-None if not associataed with a Group on disk.
-
---
#### property nirs
@@ -2604,7 +2800,7 @@ This group stores one set of NIRS data. This can be extended by adding the coun
---
-
+
### method `close`
@@ -2620,7 +2816,7 @@ After closing, the underlying SNIRF file cannot be accessed from this interface
---
-
+
### method `copy`
@@ -2634,7 +2830,7 @@ A copy of a Snirf instance is a brand new HDF5 file in memory. This can be expe
---
-
+
### method `is_empty`
@@ -2652,7 +2848,37 @@ If the Group has no member Groups or Datasets.
---
-
+
+
+### method `measurementList_to_measurementLists`
+
+```python
+measurementList_to_measurementLists()
+```
+
+Convert the `measurementList` field of all `Data` elements to `measurementLists`.
+
+Does not delete the measurementList Dataset.
+
+---
+
+
+
+### method `measurementLists_to_measurementList`
+
+```python
+measurementLists_to_measurementList()
+```
+
+Converts `measurementLists` to a `measurementList` indexed Group structure if it is present.
+
+This method will create new `measurementList` indexed Group entries populated with the contents of the `measurementLists` Group.
+
+The `measurementList` Group is not removed.
+
+---
+
+
### method `save`
@@ -2682,7 +2908,7 @@ Save a SNIRF file to disk.
---
-
+
### method `validate`
diff --git a/gen/README.md b/gen/README.md
index 46ed8bc..a35acdb 100644
--- a/gen/README.md
+++ b/gen/README.md
@@ -14,5 +14,5 @@ This ensures easy maintenance of the project as the specification develops.
1. Ensure that [data.py](https://github.com/BUNPC/pysnirf2/blob/main/gen/data.py) contains correct data to parse the latest spec. Make sure `SPEC_SRC` and `VERSION` are up to date.
2. IMPORTANT! Back up or commit local changes to the code via git. The generation process may delete your changes.
-3. Using a Python > 3.6 environment equipped with [gen/requirements.txt](https://github.com/BUNPC/pysnirf2/blob/main/gen/requirements.txt), run [gen.py](https://github.com/BUNPC/pysnirf2/blob/main/gen/gen.py) from the project root
+3. Using a Python > 3.9 environment equipped with [gen/requirements.txt](https://github.com/BUNPC/pysnirf2/blob/main/gen/requirements.txt), run [gen.py](https://github.com/BUNPC/pysnirf2/blob/main/gen/gen.py) from the project root
4. Test the resulting library
diff --git a/gen/data.py b/gen/data.py
index 5e97fd3..17e6f8f 100644
--- a/gen/data.py
+++ b/gen/data.py
@@ -1,5 +1,5 @@
-SPEC_SRC = 'https://raw.githubusercontent.com/fNIRS/snirf/v1.1/snirf_specification.md'
-SPEC_VERSION = 'v1.1' # Version of the spec linked above
+SPEC_SRC = 'https://raw.githubusercontent.com/sstucker/snirf/refs/heads/master/snirf_specification.md'
+SPEC_VERSION = '1.2-development' # Version of the spec linked above
"""
These types are fragments of the string codes used to describe the types of
@@ -38,6 +38,15 @@
DEFINITIONS_DELIM_START = '### SNIRF data container definitions'
DEFINITIONS_DELIM_END = '## Appendix'
+DATA_TYPE_DELIM_START = '### Supported `measurementList(k).dataType` values in `dataTimeSeries`'
+DATA_TYPE_DELIM_END = '### Supported `measurementList(k).dataTypeLabel` values in `dataTimeSeries`'
+
+DATA_TYPE_LABEL_TABLE_START = '### Supported `measurementList(k).dataTypeLabel` values in `dataTimeSeries`'
+DATA_TYPE_LABEL_TABLE_END = '### Supported `/nirs(i)/aux(j)/name` values'
+
+AUX_NAME_TABLE_START = '### Supported `/nirs(i)/aux(j)/name` values'
+AUX_NAME_TABLE_END = '### Examples of stimulus waveforms'
+
# -- BIDS Probe name identifiers ---------------------------------------------
BIDS_PROBE_NAMES = ['ICBM452AirSpace',
diff --git a/gen/gen.py b/gen/gen.py
index 19b8c85..b1c7e70 100644
--- a/gen/gen.py
+++ b/gen/gen.py
@@ -7,7 +7,7 @@
import getpass
import os
import sys
-import warnings
+import re
from pylint import lint
"""
@@ -15,7 +15,7 @@
hosted at SPEC_SRC.
"""
-LIB_VERSION = '0.8.2' # Version for this script
+LIB_VERSION = '0.9.2' # Version for this script
if __name__ == '__main__':
@@ -38,7 +38,7 @@
local_spec = SPEC_SRC.split('/')[-1].split('.')[0] + '_retrieved_' + datetime.now().strftime('%d_%m_%y') + '.txt'
- if os.path.exists(local_spec):
+ if os.path.exists(local_spec) and input('Use local specification document ' + local_spec + '? y/n\n') == 'y':
print('Loading specification from local document', local_spec, '...')
with open(local_spec, 'r') as f:
text = f.read()
@@ -87,7 +87,7 @@
# Get name: format pairs for each name
if len(name) > 1: # Skip the empty row
type_code = delim[-2].replace(' ', '').replace('`', '')
- type_codes.append(type_code)
+ type_codes.append((type_code, name))
print('Found', len(type_codes), 'types in the table...')
@@ -128,8 +128,16 @@
f.write(location.replace('(i)', '').replace('(j)', '').replace('(k)', '') + '\n')
print('Wrote to locations.txt')
+ errf = False
+ for (type_code, location) in zip(type_codes, locations):
+ if type_code[1] not in location:
+ errf = True
+ print('Specification format issue: location {} aligned to name/type {} from schema table'.format(location, type_code))
+ if errf:
+ sys.exit('pysnirf2 generation aborted.')
+
if len(locations) != len(type_codes) or len(locations) != len(descriptions):
- sys.exit('Parsed ' + str(len(type_codes)) + ' type codes from the summary table but '
+ sys.exit('Parsed ' + str(len(type_codes[0])) + ' type codes from the summary table but '
+ str(len(locations)) + ' names from the definitions and ' + str(len(descriptions))
+ ' descriptions: the specification hosted at ' + SPEC_SRC +' was parsed incorrectly. Try adjusting the delimiters and then debug the parsing code (gen.py).')
@@ -144,7 +152,7 @@
})
for i, (location, description) in enumerate(zip(locations, descriptions)):
- type_code = type_codes[i]
+ type_code = type_codes[i][0]
name = location.split('/')[-1].split('(')[0] # Remove (i), (j)
parent = location.split('/')[-2].split('(')[0] # Remove (i), (j)
print('Found', location, 'with type', type_code)
@@ -163,18 +171,38 @@
'required': required
})
- ans = input('Proceed? y/n\n')
- if ans not in ['y', 'Y']:
+ if input('Proceed? y/n\n') not in ['y', 'Y']:
sys.exit('pysnirf2 generation aborted.')
print('Loading BIDS-specified Probe names from gen/data.py...')
for name in BIDS_PROBE_NAMES:
print('Found', name)
-
- ans = input('Proceed? y/n\n')
- if ans not in ['y', 'Y']:
+
+ print('\nParsing specification for supported data type integer values...')
+ data_type_table = unidecode(text).split(DATA_TYPE_DELIM_START)[1].split(DATA_TYPE_DELIM_END)[0]
+ data_types = re.findall(r'(?<=\s)-\s(\d+)\s-', data_type_table)
+ data_types = [int(i) for i in data_types]
+ for i in data_types:
+ print('Found', i)
+ if input('Proceed? y/n\n') not in ['y', 'Y']:
sys.exit('pysnirf2 generation aborted.')
-
+
+ print('\nParsing specification for supported aux names...')
+ aux_name_table = unidecode(text).split(AUX_NAME_TABLE_START)[1].split(AUX_NAME_TABLE_END)[0]
+ aux_names = re.findall(r'"(.*?)"', aux_name_table)
+ for name in aux_names:
+ print('Found', name)
+ if input('Proceed? y/n\n') not in ['y', 'Y']:
+ sys.exit('pysnirf2 generation aborted.')
+
+ print('\nParsing specification for supported data type labels...')
+ data_type_label_table = unidecode(text).split(DATA_TYPE_LABEL_TABLE_START)[1].split(DATA_TYPE_LABEL_TABLE_END)[0]
+ data_type_labels = re.findall(r'"(.*?)"', data_type_label_table)
+ for name in data_type_labels:
+ print('Found', name)
+ if input('Proceed? y/n\n') not in ['y', 'Y']:
+ sys.exit('pysnirf2 generation aborted.')
+
# Generate data for template
SNIRF = {
'VERSION': SPEC_VERSION,
@@ -188,7 +216,10 @@
'INDEXED_GROUPS': [],
'GROUPS': [],
'UNSPECIFIED_DATASETS_OK': UNSPECIFIED_DATASETS_OK,
- 'BIDS_COORDINATE_SYSTEM_NAMES': BIDS_PROBE_NAMES
+ 'BIDS_COORDINATE_SYSTEM_NAMES': BIDS_PROBE_NAMES,
+ 'AUX_NAMES': aux_names,
+ 'DATA_TYPES': data_types,
+ 'DATA_TYPE_LABELS': data_type_labels
}
# Build list of groups and indexed groups
@@ -223,8 +254,7 @@
SNIRF['FOOTER'] = TEMPLATE_INSERT_END_STR + b.split(TEMPLATE_INSERT_END_STR, 1)[1]
print('Loaded footer code, {} lines'.format(len(SNIRF['FOOTER'].split('\n'))))
- ans = input('Proceed? LOCAL CHANGES MAY BE OVERWRITTEN OR LOST! y/n\n')
- if ans not in ['y', 'Y']:
+ if input('Proceed? LOCAL CHANGES MAY BE OVERWRITTEN OR LOST! y/n\n') not in ['y', 'Y']:
sys.exit('pysnirf2 generation aborted.')
try:
os.remove(library_path)
@@ -248,12 +278,10 @@
if errors == 0:
print('pysnirf2.py generated with', errors, 'errors.')
- ans = input('Format the generated code? y/n\n')
- if ans in ['y', 'Y']:
+ if input('Format the generated code? y/n\n') in ['y', 'Y']:
FormatFile(library_path, in_place=True)[:2]
-
- ans = input('Lint the generated code? y/n\n')
- if ans in ['y', 'Y']:
+
+ if input('Lint the generated code? y/n\n') in ['y', 'Y']:
lint.Run(['--errors-only', library_path])
print('\npysnirf2 generation complete.')
diff --git a/gen/pysnirf2.jinja b/gen/pysnirf2.jinja
index 04344c8..4579f59 100644
--- a/gen/pysnirf2.jinja
+++ b/gen/pysnirf2.jinja
@@ -76,11 +76,11 @@
{% if TYPES.INDEXED_GROUP in CHILD.type %}
return self._{{ CHILD.name }}
{% elif TYPES.GROUP in CHILD.type %}
- if type(self._{{ CHILD.name }}) is type(_AbsentGroup):
+ if self._{{ CHILD.name }} is _AbsentGroup:
return None
return self._{{ CHILD.name }}
{% else %}
- if type(self._{{ CHILD.name }}) is type(_AbsentDataset):
+ if self._{{ CHILD.name }} is _AbsentDataset:
return None
if type(self._{{ CHILD.name }}) is type(_PresentDataset):
{% if (TYPES.ARRAY_1D in CHILD.type) or (TYPES.ARRAY_2D in CHILD.type) %}
@@ -117,6 +117,9 @@
self._{{ CHILD.name }} = _recursive_hdf5_copy(self._{{ CHILD.name }}, value)
else:
raise ValueError("Only a Group of type {{ sentencecase(CHILD.name) }} can be assigned to {{ CHILD.name }}.")
+ {% elif TYPES.ARRAY_1D in CHILD.type or TYPES.ARRAY_2D in CHILD.type %}
+ if value is not None and any([v is not None for v in value]):
+ self._{{ CHILD.name }} = np.array(value)
{% else %}
self._{{ CHILD.name }} = value
{% endif %}
@@ -152,18 +155,18 @@
else:
raise ValueError('Cannot save an anonymous ' + self.__class__.__name__ + ' instance without a filename')
{% for CHILD in NODE.children %}
+ name = self.location + '/{{ CHILD.name }}'
{% if TYPES.INDEXED_GROUP in CHILD.type %}
self.{{ CHILD.name }}._save(*args)
{% elif TYPES.GROUP in CHILD.type %}
- if type(self._{{ CHILD.name }}) is type(_AbsentGroup) or self._{{ CHILD.name }}.is_empty():
- if '{{ CHILD.name }}' in file:
- del file['{{ CHILD.name }}']
+ if self._{{ CHILD.name }} is _AbsentGroup or self._{{ CHILD.name }}.is_empty():
+ if name in file:
+ del file[name]
self._cfg.logger.info('Deleted Group %s/{{ CHILD.name }} from %s', self.location, file)
else:
self.{{ CHILD.name }}._save(*args)
{% else %}
- name = self.location + '/{{ CHILD.name }}'
- if type(self._{{ CHILD.name }}) not in [type(_AbsentDataset), type(None)]:
+ if not self._{{ CHILD.name }} is _AbsentDataset:
data = self.{{ CHILD.name }} # Use loader function via getter
if name in file:
del file[name]
@@ -222,7 +225,7 @@
{% macro gen_validator(NODE) %}
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()), 'w', driver='core', backing_store=False) as tmp:
{% for CHILD in NODE.children %}
name = self.location + '/{{ CHILD.name }}'
{% if TYPES.INDEXED_GROUP in CHILD.type %}
@@ -237,7 +240,7 @@
self.{{ CHILD.name }}._validate(result)
{% elif TYPES.GROUP in CHILD.type %}
# If Group is not present in file and empty in the wrapper, it is missing
- if type(self._{{ CHILD.name }}) in [type(_AbsentGroup), type(None)] or ('{{ CHILD.name }}' not in self._h and self._{{ CHILD.name }}.is_empty()):
+ if self._{{ CHILD.name }} is _AbsentGroup or ('{{ CHILD.name }}' not in self._h and self._{{ CHILD.name }}.is_empty()):
{% if TYPES.REQUIRED in CHILD.type %}
result._add(name, 'REQUIRED_GROUP_MISSING')
{% else %}
@@ -246,7 +249,7 @@
else:
self._{{ CHILD.name }}._validate(result)
{% else %}
- if type(self._{{ CHILD.name }}) in [type(_AbsentDataset), type(None)]:
+ if self._{{ CHILD.name }} is _AbsentDataset:
{% if TYPES.REQUIRED in CHILD.type %}
result._add(name, 'REQUIRED_DATASET_MISSING')
{% else %}
@@ -379,6 +382,7 @@ class Snirf(Group):
self._cfg = SnirfConfig()
self._cfg.dynamic_loading = dynamic_loading
self._cfg.fmode = ''
+ self._f = None # handle for filelikes and temporary files
if len(args) > 0:
path = args[0]
if enable_logging:
@@ -416,7 +420,8 @@ class Snirf(Group):
self._cfg.logger.info('Loading from filelike object')
if self._cfg.fmode == '':
self._cfg.fmode = 'r'
- self._h = h5py.File(path, 'r')
+ self._f = args[0]
+ self._h = h5py.File(self._f, 'r', backing_store=False)
else:
raise TypeError(str(path) + ' is not a valid filename')
else:
@@ -427,7 +432,7 @@ class Snirf(Group):
else:
self._cfg.logger = _create_logger('', None) # Do not log to file
self._cfg.fmode = 'w'
- self._h = h5py.File(TemporaryFile(), 'w')
+ self._h = h5py.File(str(uuid.uuid4()), 'w', driver='core', backing_store=False)
{{ declare_members(ROOT) | indent }}
{{ init_members(ROOT) | indent }}
{{ gen_properties(ROOT) }}
@@ -436,4 +441,10 @@ class Snirf(Group):
_RECOGNIZED_COORDINATE_SYSTEM_NAMES = [{% for NAME in BIDS_COORDINATE_SYSTEM_NAMES %}'{{ NAME }}', {% endfor -%}]
+_RECOGNIZED_AUX_NAMES = [{% for NAME in AUX_NAMES %}'{{ NAME }}', {% endfor -%}]
+
+_RECOGNIZED_DATA_TYPES = [{% for VALUE in DATA_TYPES %}{{ VALUE }}, {% endfor -%}]
+
+_RECOGNIZED_DATA_TYPE_LABELS = [{% for NAME in DATA_TYPE_LABELS %}'{{ NAME }}', {% endfor -%}]
+
{{ FOOTER }}
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 2351790..d43b6be 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
h5py
-numpy
+numpy>=2.0.0
setuptools
pip
termcolor
diff --git a/setup.py b/setup.py
index bbe648b..8320143 100644
--- a/setup.py
+++ b/setup.py
@@ -75,10 +75,10 @@ def run(self):
long_description=long_description,
long_description_content_type='text/markdown',
author_email='sstucker@bu.edu',
- python_requires='>=3.6.0',
+ python_requires='>=3.9.0',
install_requires=[
'h5py>=3.1.0',
- 'numpy',
+ 'numpy>2.0.0',
'setuptools',
'pip',
'termcolor',
diff --git a/snirf/pysnirf2.py b/snirf/pysnirf2.py
index 044c81c..18023b0 100644
--- a/snirf/pysnirf2.py
+++ b/snirf/pysnirf2.py
@@ -24,7 +24,7 @@
import numpy as np
from warnings import warn
from collections.abc import MutableSequence
-from tempfile import TemporaryFile
+import uuid
import logging
from typing import Tuple
import time
@@ -43,7 +43,7 @@
class SnirfFormatError(Warning):
- """Raised when SNIRF-specific error prevents file from loading properly."""
+ """Raised when SNIRF-specific error prevents file from loading or saving properly."""
pass
@@ -154,16 +154,23 @@ def _get_padded_shape(name: str, data: np.ndarray,
"""Utility function which pads data shape to ndim."""
if desired_ndim is None:
return data.shape
+ if data.ndim == desired_ndim:
+ return np.shape(data)
elif desired_ndim > data.ndim:
return np.concatenate(
[data.shape,
np.ones(int(desired_ndim) - int(data.ndim))])
- elif data.ndim == desired_ndim:
- return np.shape(data)
- else:
- raise ValueError(
- "Could not create dataset {}: ndim={} is incompatible with data which has shape {}."
- .format(name, desired_ndim, data.shape))
+ elif desired_ndim < data.ndim:
+ flattened = [x for x in data.shape if x > 1]
+ if len(flattened) == desired_ndim:
+ warn(
+ "Dataset '{}' must have ndim {} but had erroneous shape {}. Singular dimensions were removed."
+ .format(name, desired_ndim, data.shape))
+ return flattened
+ else:
+ raise SnirfFormatError(
+ "Cannot coerce Dataset '{}' with shape {} to the required {} dimension(s)"
+ .format(name, data.shape, desired_ndim))
def _create_dataset(file: h5py.File, name: str, data):
@@ -185,6 +192,8 @@ def _create_dataset(file: h5py.File, name: str, data):
Raises:
TypeError: The data could not be mapped to a SNIRF compliant h5py format.
"""
+ if data is None: # Don't create dataset from None
+ return
data = np.array(data) # Cast to numpy type to identify
if data.size > 1:
dtype = data[0].dtype
@@ -220,6 +229,8 @@ def _create_dataset_string(file: h5py.File, name: str, data: str):
Returns:
An h5py.Dataset instance created
"""
+ if data is None:
+ return None
return file.create_dataset(name, dtype=_varlen_str_type, data=str(data))
@@ -234,6 +245,8 @@ def _create_dataset_int(file: h5py.File, name: str, data: int):
Returns:
An h5py.Dataset instance created
"""
+ if data is None:
+ return None
return file.create_dataset(name, dtype=_DTYPE_INT32, data=int(data))
@@ -248,6 +261,8 @@ def _create_dataset_float(file: h5py.File, name: str, data: float):
Returns:
An h5py.Dataset instance created
"""
+ if data is None:
+ return None
return file.create_dataset(name, dtype=_DTYPE_FLOAT64, data=float(data))
@@ -265,7 +280,11 @@ def _create_dataset_string_array(file: h5py.File,
Returns:
An h5py.Dataset instance created
"""
- array = np.array(data).astype('O')
+ try:
+ array = np.array(data).astype('O')
+ except TypeError as e:
+ warn('Could not cast {} array to numpy "O": {}'.format(name, e))
+ return
shape = _get_padded_shape(name, array, ndim)
return file.create_dataset(name, dtype=_varlen_str_type, data=array)
@@ -284,7 +303,11 @@ def _create_dataset_int_array(file: h5py.File,
Returns:
An h5py.Dataset instance created
"""
- array = np.array(data).astype(int)
+ try:
+ array = np.array(data).astype(int)
+ except TypeError as e:
+ warn('Could not cast {} array to int: {}'.format(name, e))
+ return
shape = _get_padded_shape(name, array, ndim)
return file.create_dataset(name, dtype=_DTYPE_INT32, data=array)
@@ -303,7 +326,11 @@ def _create_dataset_float_array(file: h5py.File,
Returns:
An h5py.Dataset instance created
"""
- array = np.array(data).astype(float)
+ try:
+ array = np.array(data).astype(float)
+ except TypeError as e:
+ warn('Could not cast {} array to float: {}'.format(name, e))
+ return
shape = _get_padded_shape(name, array, ndim)
return file.create_dataset(name,
dtype=_DTYPE_FLOAT64,
@@ -486,63 +513,69 @@ def _read_float_array(dataset: h5py.Dataset) -> np.ndarray:
(8, 3,
'The number of measurementList elements does not match the second dimension of dataTimeSeries'
),
- 'INVALID_TIME':
+ 'INVALID_MEASUREMENTLISTS':
(9, 3,
+ 'The length of at least one measurementLists element does not match the second dimension of dataTimeSeries'
+ ),
+ 'INVALID_TIME':
+ (10, 3,
'The length of the data/time vector does not match the first dimension of data/dataTimeSeries'
),
'INVALID_STIM_DATALABELS':
- (10, 3,
+ (11, 3,
'The length of stim/dataLabels exceeds the second dimension of stim/data'
),
'INVALID_SOURCE_INDEX':
- (11, 3,
- 'measurementList/sourceIndex exceeds length of probe/sourceLabels'),
- 'INVALID_DETECTOR_INDEX':
(12, 3,
- 'measurementList/detectorIndex exceeds length of probe/detectorLabels'),
- 'INVALID_WAVELENGTH_INDEX':
+ 'measurementList(s)/sourceIndex exceeds length of probe/sourceLabels or the first axis of source position data'
+ ),
+ 'INVALID_DETECTOR_INDEX':
(13, 3,
- 'measurementList/waveLengthIndex exceeds length of probe/wavelengths'),
- 'NEGATIVE_INDEX': (14, 3, 'An index is negative'),
+ 'measurementList(s)/detectorIndex exceeds length of probe/detectorLabels or the first axis of source position data'
+ ),
+ 'INVALID_WAVELENGTH_INDEX':
+ (14, 3,
+ 'measurementList(s)/waveLengthIndex exceeds length of probe/wavelengths'),
+ 'NEGATIVE_INDEX': (15, 3, 'An index is negative'),
# Warnings (Severity 2)
- 'INDEX_OF_ZERO': (15, 2, 'An index of zero is usually undefined'),
- 'UNRECOGNIZED_GROUP': (16, 2,
+ 'INDEX_OF_ZERO': (16, 2, 'An index of zero is usually undefined'),
+ 'UNRECOGNIZED_GROUP': (17, 2,
'An unspecified Group is a part of the file'),
'UNRECOGNIZED_DATASET':
- (17, 2,
- 'An unspecified Dataset is a part of the file in an unexpected place'),
- 'UNRECOGNIZED_DATATYPELABEL':
(18, 2,
- 'measurementList/dataTypeLabel is not one of the recognized values listed in the Appendix'
+ 'An unspecified Dataset is a part of the file in an unexpected place'),
+ 'UNRECOGNIZED_DATA_TYPE_LABEL':
+ (19, 3,
+ 'measurementList(s)/dataTypeLabel is not one of the recognized values listed in the Appendix'
),
- 'UNRECOGNIZED_DATATYPE':
- (19, 2,
- 'measurementList/dataType is not one of the recognized values listed in the Appendix'
+ 'UNRECOGNIZED_DATA_TYPE':
+ (20, 3,
+ 'measurementList(s)/dataType is not one of the recognized values listed in the Appendix'
),
'INT_64':
- (25, 2,
+ (21, 2,
'The SNIRF specification limits users to the use of 32 bit native integer types'
),
'UNRECOGNIZED_COORDINATE_SYSTEM':
- (26, 2,
+ (22, 2,
'The identifying string of the coordinate system was not recognized.'),
'NO_COORDINATE_SYSTEM_DESCRIPTION':
- (27, 2,
+ (23, 2,
"The coordinate system was unrecognized or 'Other' but lacks a probe/coordinateSystemDescription"
),
'FIXED_LENGTH_STRING':
- (20, 2,
+ (24, 2,
'The use of fixed-length strings is discouraged and may be banned by a future spec version. Rewrite this file with pysnirf2 to use variable length strings'
),
# Info (Severity 1)
- 'OPTIONAL_GROUP_MISSING': (21, 1,
+ 'OPTIONAL_GROUP_MISSING': (25, 1,
'Missing an optional Group in this location'),
- 'OPTIONAL_DATASET_MISSING': (22, 1,
+ 'OPTIONAL_DATASET_MISSING': (26, 1,
'Missing optional Dataset in this location'),
'OPTIONAL_INDEXED_GROUP_EMPTY':
- (23, 1, 'The optional indexed group has no elements'),
+ (27, 1, 'The optional indexed group has no elements'),
# OK (Severity 0)
- 'OK': (24, 0, 'No issues detected'),
+ 'OK': (28, 0, 'No issues detected'),
}
@@ -560,6 +593,7 @@ class ValidationIssue:
3 `FATAL`, The file is invalid.
message: A string containing a more verbose description of the issue
"""
+
def __init__(self, name: str, location: str):
self.location = location # A location in the Snirf file matching an HDF5 name
self.name = name # The name of the issue, a key in _CODES above
@@ -596,9 +630,10 @@ class ValidationResult:
```
= .validate()
- = validateSnirf()
+ = validateSnirf()
```
"""
+
def __init__(self):
"""`ValidationResult` should only be created by a `Snirf` instance's `validate` method."""
self._issues = []
@@ -679,7 +714,7 @@ def display(self, severity=2):
longest_code = max([len(code) for code in self.codes])
except ValueError:
print('Empty ValidationResult: nothing to display')
- s = object.__repr__(self) + '\n'
+ s = repr(self) + '\n'
printed = [0, 0, 0, 0]
for issue in self._issues:
sev = issue.severity
@@ -737,6 +772,8 @@ def _validate_string(dataset: h5py.Dataset) -> str:
Returns:
An issue code describing the validity of the dataset based on its format and shape
"""
+ if dataset is None:
+ return 'REQUIRED_DATASET_MISSING'
if type(dataset) is not h5py.Dataset:
raise TypeError("'dataset' must be type h5py.Dataset")
if dataset.size > 1 or dataset.ndim > 0:
@@ -758,6 +795,8 @@ def _validate_int(dataset: h5py.Dataset) -> str:
Returns:
An issue code describing the validity of the dataset based on its format and shape
"""
+ if dataset is None:
+ return 'REQUIRED_DATASET_MISSING'
if type(dataset) is not h5py.Dataset:
raise TypeError("'dataset' must be type h5py.Dataset")
if dataset.size > 1 or dataset.ndim > 0:
@@ -783,6 +822,8 @@ def _validate_float(dataset: h5py.Dataset) -> str:
Returns:
An issue code describing the validity of the dataset based on its format and shape
"""
+ if dataset is None:
+ return 'REQUIRED_DATASET_MISSING'
if type(dataset) is not h5py.Dataset:
raise TypeError("'dataset' must be type h5py.Dataset")
if dataset.size > 1 or dataset.ndim > 0:
@@ -802,6 +843,8 @@ def _validate_string_array(dataset: h5py.Dataset, ndims=[1]) -> str:
Returns:
An issue code describing the validity of the dataset based on its format and shape
"""
+ if dataset is None:
+ return 'REQUIRED_DATASET_MISSING'
if type(dataset) is not h5py.Dataset:
raise TypeError("'dataset' must be type h5py.Dataset")
if dataset.ndim not in ndims:
@@ -823,6 +866,8 @@ def _validate_int_array(dataset: h5py.Dataset, ndims=[1]) -> str:
Returns:
An issue code describing the validity of the dataset based on its format and shape
"""
+ if dataset is None:
+ return 'REQUIRED_DATASET_MISSING'
if type(dataset) is not h5py.Dataset:
raise TypeError("'dataset' must be type h5py.Dataset")
if dataset.ndim not in ndims:
@@ -844,6 +889,8 @@ def _validate_float_array(dataset: h5py.Dataset, ndims=[1]) -> str:
Returns:
An issue code describing the validity of the dataset based on its format and shape
"""
+ if dataset is None:
+ return 'REQUIRED_DATASET_MISSING'
if type(dataset) is not h5py.Dataset:
raise TypeError("'dataset' must be type h5py.Dataset")
if dataset.ndim != ndims[0]:
@@ -864,6 +911,7 @@ class SnirfConfig:
logger (logging.Logger): The logger that the Snirf instance writes to
dynamic_loading (bool): If True, data is loaded from the HDF5 file only on access via property
"""
+
def __init__(self):
self.logger: logging.Logger = _logger # The logger that the interface will write to
self.dynamic_loading: bool = False # If False, data is loaded in the constructor, if True, data is loaded on access
@@ -892,6 +940,7 @@ class _PresentDatasetType():
class Group(ABC):
+
def __init__(self, varg, cfg: SnirfConfig):
"""Wrapper for an HDF5 Group element defined by SNIRF.
@@ -985,10 +1034,7 @@ def filename(self):
@property
def location(self):
- """The HDF5 relative location indentifier.
-
- None if not associataed with a Group on disk.
- """
+ """The HDF5 relative location indentifier."""
if self._h != {}:
return self._h.name
else:
@@ -1331,9 +1377,12 @@ def _get_matching_keys(self, h=None):
for key in h:
numsplit = key.split(self._name)
if len(numsplit) > 1 and len(numsplit[1]) > 0:
- if len(numsplit[1]) == len(str(int(numsplit[1]))):
- unordered.append(key)
- indices.append(int(numsplit[1]))
+ try:
+ index = int(numsplit[1])
+ except ValueError: # if name is not really an indexed group member (e.g. `measurementLists`)
+ continue
+ unordered.append(key)
+ indices.append(index)
elif key.endswith(
self._name): # Case of single Group with no index
unordered.append(key)
@@ -1395,8 +1444,8 @@ def _recursive_hdf5_copy(g_dst: Group, g_src: Group):
# ================================================================================
# <<< BEGIN TEMPLATE INSERT >>>
-# generated by sstucker on 2023-09-18
-# version v1.1 SNIRF specification parsed from https://raw.githubusercontent.com/fNIRS/snirf/v1.1/snirf_specification.md
+# generated by sstucker on 2024-12-31
+# version 1.2-development SNIRF specification parsed from https://raw.githubusercontent.com/sstucker/snirf/refs/heads/master/snirf_specification.md
class MetaDataTags(Group):
@@ -1411,6 +1460,7 @@ class MetaDataTags(Group):
The below five metadata records are minimally required in a SNIRF file
"""
+
def __init__(self, var, cfg: SnirfConfig):
super().__init__(var, cfg)
self._SubjectID = _AbsentDataset # "s"*
@@ -1494,7 +1544,7 @@ def SubjectID(self):
This record stores the string-valued ID of the study subject or experiment.
"""
- if type(self._SubjectID) is type(_AbsentDataset):
+ if self._SubjectID is _AbsentDataset:
return None
if type(self._SubjectID) is type(_PresentDataset):
return _read_string(self._h['SubjectID'])
@@ -1527,7 +1577,7 @@ def MeasurementDate(self):
- `DD` is the 2-digit date (padding zero if a single digit)
"""
- if type(self._MeasurementDate) is type(_AbsentDataset):
+ if self._MeasurementDate is _AbsentDataset:
return None
if type(self._MeasurementDate) is type(_PresentDataset):
return _read_string(self._h['MeasurementDate'])
@@ -1563,7 +1613,7 @@ def MeasurementTime(self):
- `TZD` is the time zone designator (`Z` or `+hh:mm` or `-hh:mm`)
"""
- if type(self._MeasurementTime) is type(_AbsentDataset):
+ if self._MeasurementTime is _AbsentDataset:
return None
if type(self._MeasurementTime) is type(_PresentDataset):
return _read_string(self._h['MeasurementTime'])
@@ -1595,7 +1645,7 @@ def LengthUnit(self):
"um" is the same as "mm", i.e. micrometer.
"""
- if type(self._LengthUnit) is type(_AbsentDataset):
+ if self._LengthUnit is _AbsentDataset:
return None
if type(self._LengthUnit) is type(_PresentDataset):
return _read_string(self._h['LengthUnit'])
@@ -1626,7 +1676,7 @@ def TimeUnit(self):
is the same as "ms", i.e. microsecond.
"""
- if type(self._TimeUnit) is type(_AbsentDataset):
+ if self._TimeUnit is _AbsentDataset:
return None
if type(self._TimeUnit) is type(_PresentDataset):
return _read_string(self._h['TimeUnit'])
@@ -1692,7 +1742,7 @@ def FrequencyUnit(self):
time in seconds since 1970-01-01T00:00:00Z (UTC) minus the leap seconds.
"""
- if type(self._FrequencyUnit) is type(_AbsentDataset):
+ if self._FrequencyUnit is _AbsentDataset:
return None
if type(self._FrequencyUnit) is type(_PresentDataset):
return _read_string(self._h['FrequencyUnit'])
@@ -1730,7 +1780,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/SubjectID'
- if type(self._SubjectID) not in [type(_AbsentDataset), type(None)]:
+ if not self._SubjectID is _AbsentDataset:
data = self.SubjectID # Use loader function via getter
if name in file:
del file[name]
@@ -1741,9 +1791,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/MeasurementDate'
- if type(self._MeasurementDate) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._MeasurementDate is _AbsentDataset:
data = self.MeasurementDate # Use loader function via getter
if name in file:
del file[name]
@@ -1754,9 +1802,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/MeasurementTime'
- if type(self._MeasurementTime) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._MeasurementTime is _AbsentDataset:
data = self.MeasurementTime # Use loader function via getter
if name in file:
del file[name]
@@ -1767,7 +1813,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/LengthUnit'
- if type(self._LengthUnit) not in [type(_AbsentDataset), type(None)]:
+ if not self._LengthUnit is _AbsentDataset:
data = self.LengthUnit # Use loader function via getter
if name in file:
del file[name]
@@ -1778,7 +1824,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/TimeUnit'
- if type(self._TimeUnit) not in [type(_AbsentDataset), type(None)]:
+ if not self._TimeUnit is _AbsentDataset:
data = self.TimeUnit # Use loader function via getter
if name in file:
del file[name]
@@ -1789,7 +1835,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/FrequencyUnit'
- if type(self._FrequencyUnit) not in [type(_AbsentDataset), type(None)]:
+ if not self._FrequencyUnit is _AbsentDataset:
data = self.FrequencyUnit # Use loader function via getter
if name in file:
del file[name]
@@ -1812,9 +1858,12 @@ def _save(self, *args):
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/SubjectID'
- if type(self._SubjectID) in [type(_AbsentDataset), type(None)]:
+ if self._SubjectID is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -1828,9 +1877,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/MeasurementDate'
- if type(self._MeasurementDate) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._MeasurementDate is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -1844,9 +1891,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/MeasurementTime'
- if type(self._MeasurementTime) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._MeasurementTime is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -1860,7 +1905,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/LengthUnit'
- if type(self._LengthUnit) in [type(_AbsentDataset), type(None)]:
+ if self._LengthUnit is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -1874,7 +1919,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/TimeUnit'
- if type(self._TimeUnit) in [type(_AbsentDataset), type(None)]:
+ if self._TimeUnit is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -1888,7 +1933,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/FrequencyUnit'
- if type(self._FrequencyUnit) in [type(_AbsentDataset), type(None)]:
+ if self._FrequencyUnit is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -1903,230 +1948,997 @@ def _validate(self, result: ValidationResult):
result._add(name, 'INVALID_DATASET_TYPE')
-class Probe(Group):
- """Wrapper for Group of type `probe`.
+class MeasurementLists(Group):
+ """Wrapper for Group of type `measurementLists`.
- This is a structured variable that describes the probe (source-detector)
- geometry. This variable has a number of required fields.
+ The group for measurement list variables which map the data array onto the probe geometry (sources and detectors), data type, and wavelength. This group's datasets are arrays with size ``, with each position describing the corresponding column in the data matrix. (i.e. the values at `measurementLists/sourceIndex(3)` and `measurementLists/detectorIndex(3)` correspond to `dataTimeSeries(:,3)`).
+
+ This group is required only if the indexed-group format `/nirs(i)/data(j)/measurementList(k)` is not used to encode the measurement list. `measurementLists` is an alternative that may offer better performance for larger probes.
+
+ The arrays of `measurementLists` are:
"""
+
def __init__(self, var, cfg: SnirfConfig):
super().__init__(var, cfg)
- self._wavelengths = _AbsentDataset # [,...]*
- self._wavelengthsEmission = _AbsentDataset # [,...]
- self._sourcePos2D = _AbsentDataset # [[,...]]*1
- self._sourcePos3D = _AbsentDataset # [[,...]]*1
- self._detectorPos2D = _AbsentDataset # [[,...]]*2
- self._detectorPos3D = _AbsentDataset # [[,...]]*2
- self._frequencies = _AbsentDataset # [,...]
- self._timeDelays = _AbsentDataset # [,...]
- self._timeDelayWidths = _AbsentDataset # [,...]
- self._momentOrders = _AbsentDataset # [,...]
- self._correlationTimeDelays = _AbsentDataset # [,...]
- self._correlationTimeDelayWidths = _AbsentDataset # [,...]
- self._sourceLabels = _AbsentDataset # [["s",...]]
- self._detectorLabels = _AbsentDataset # ["s",...]
- self._landmarkPos2D = _AbsentDataset # [[,...]]
- self._landmarkPos3D = _AbsentDataset # [[,...]]
- self._landmarkLabels = _AbsentDataset # ["s",...]
- self._coordinateSystem = _AbsentDataset # "s"
- self._coordinateSystemDescription = _AbsentDataset # "s"
- self._useLocalIndex = _AbsentDataset #
+ self._sourceIndex = _AbsentDataset # [,...]*
+ self._detectorIndex = _AbsentDataset # [,...]*
+ self._wavelengthIndex = _AbsentDataset # [,...]*
+ self._wavelengthActual = _AbsentDataset # [,...]
+ self._wavelengthEmissionActual = _AbsentDataset # [,...]
+ self._dataType = _AbsentDataset # [,...]*
+ self._dataUnit = _AbsentDataset # ["s",...]
+ self._dataTypeLabel = _AbsentDataset # ["s",...]
+ self._dataTypeIndex = _AbsentDataset # [,...]*
+ self._sourcePower = _AbsentDataset # [,...]
+ self._detectorGain = _AbsentDataset # [,...]
self._snirf_names = [
- 'wavelengths',
- 'wavelengthsEmission',
- 'sourcePos2D',
- 'sourcePos3D',
- 'detectorPos2D',
- 'detectorPos3D',
- 'frequencies',
- 'timeDelays',
- 'timeDelayWidths',
- 'momentOrders',
- 'correlationTimeDelays',
- 'correlationTimeDelayWidths',
- 'sourceLabels',
- 'detectorLabels',
- 'landmarkPos2D',
- 'landmarkPos3D',
- 'landmarkLabels',
- 'coordinateSystem',
- 'coordinateSystemDescription',
- 'useLocalIndex',
+ 'sourceIndex',
+ 'detectorIndex',
+ 'wavelengthIndex',
+ 'wavelengthActual',
+ 'wavelengthEmissionActual',
+ 'dataType',
+ 'dataUnit',
+ 'dataTypeLabel',
+ 'dataTypeIndex',
+ 'sourcePower',
+ 'detectorGain',
]
self._indexed_groups = []
- if 'wavelengths' in self._h:
- if not self._cfg.dynamic_loading:
- self._wavelengths = _read_float_array(self._h['wavelengths'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._wavelengths = _PresentDataset
- else: # if the dataset is not found on disk
- self._wavelengths = _AbsentDataset
- if 'wavelengthsEmission' in self._h:
- if not self._cfg.dynamic_loading:
- self._wavelengthsEmission = _read_float_array(
- self._h['wavelengthsEmission'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._wavelengthsEmission = _PresentDataset
- else: # if the dataset is not found on disk
- self._wavelengthsEmission = _AbsentDataset
- if 'sourcePos2D' in self._h:
- if not self._cfg.dynamic_loading:
- self._sourcePos2D = _read_float_array(self._h['sourcePos2D'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._sourcePos2D = _PresentDataset
- else: # if the dataset is not found on disk
- self._sourcePos2D = _AbsentDataset
- if 'sourcePos3D' in self._h:
- if not self._cfg.dynamic_loading:
- self._sourcePos3D = _read_float_array(self._h['sourcePos3D'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._sourcePos3D = _PresentDataset
- else: # if the dataset is not found on disk
- self._sourcePos3D = _AbsentDataset
- if 'detectorPos2D' in self._h:
- if not self._cfg.dynamic_loading:
- self._detectorPos2D = _read_float_array(
- self._h['detectorPos2D'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._detectorPos2D = _PresentDataset
- else: # if the dataset is not found on disk
- self._detectorPos2D = _AbsentDataset
- if 'detectorPos3D' in self._h:
- if not self._cfg.dynamic_loading:
- self._detectorPos3D = _read_float_array(
- self._h['detectorPos3D'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._detectorPos3D = _PresentDataset
- else: # if the dataset is not found on disk
- self._detectorPos3D = _AbsentDataset
- if 'frequencies' in self._h:
- if not self._cfg.dynamic_loading:
- self._frequencies = _read_float_array(self._h['frequencies'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._frequencies = _PresentDataset
- else: # if the dataset is not found on disk
- self._frequencies = _AbsentDataset
- if 'timeDelays' in self._h:
- if not self._cfg.dynamic_loading:
- self._timeDelays = _read_float_array(self._h['timeDelays'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._timeDelays = _PresentDataset
- else: # if the dataset is not found on disk
- self._timeDelays = _AbsentDataset
- if 'timeDelayWidths' in self._h:
- if not self._cfg.dynamic_loading:
- self._timeDelayWidths = _read_float_array(
- self._h['timeDelayWidths'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._timeDelayWidths = _PresentDataset
- else: # if the dataset is not found on disk
- self._timeDelayWidths = _AbsentDataset
- if 'momentOrders' in self._h:
+ if 'sourceIndex' in self._h:
if not self._cfg.dynamic_loading:
- self._momentOrders = _read_float_array(self._h['momentOrders'])
+ self._sourceIndex = _read_int_array(self._h['sourceIndex'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._momentOrders = _PresentDataset
+ self._sourceIndex = _PresentDataset
else: # if the dataset is not found on disk
- self._momentOrders = _AbsentDataset
- if 'correlationTimeDelays' in self._h:
+ self._sourceIndex = _AbsentDataset
+ if 'detectorIndex' in self._h:
if not self._cfg.dynamic_loading:
- self._correlationTimeDelays = _read_float_array(
- self._h['correlationTimeDelays'])
+ self._detectorIndex = _read_int_array(self._h['detectorIndex'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._correlationTimeDelays = _PresentDataset
+ self._detectorIndex = _PresentDataset
else: # if the dataset is not found on disk
- self._correlationTimeDelays = _AbsentDataset
- if 'correlationTimeDelayWidths' in self._h:
+ self._detectorIndex = _AbsentDataset
+ if 'wavelengthIndex' in self._h:
if not self._cfg.dynamic_loading:
- self._correlationTimeDelayWidths = _read_float_array(
- self._h['correlationTimeDelayWidths'])
+ self._wavelengthIndex = _read_int_array(
+ self._h['wavelengthIndex'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._correlationTimeDelayWidths = _PresentDataset
+ self._wavelengthIndex = _PresentDataset
else: # if the dataset is not found on disk
- self._correlationTimeDelayWidths = _AbsentDataset
- if 'sourceLabels' in self._h:
+ self._wavelengthIndex = _AbsentDataset
+ if 'wavelengthActual' in self._h:
if not self._cfg.dynamic_loading:
- self._sourceLabels = _read_string_array(
- self._h['sourceLabels'])
+ self._wavelengthActual = _read_float_array(
+ self._h['wavelengthActual'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._sourceLabels = _PresentDataset
+ self._wavelengthActual = _PresentDataset
else: # if the dataset is not found on disk
- self._sourceLabels = _AbsentDataset
- if 'detectorLabels' in self._h:
+ self._wavelengthActual = _AbsentDataset
+ if 'wavelengthEmissionActual' in self._h:
if not self._cfg.dynamic_loading:
- self._detectorLabels = _read_string_array(
- self._h['detectorLabels'])
+ self._wavelengthEmissionActual = _read_float_array(
+ self._h['wavelengthEmissionActual'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._detectorLabels = _PresentDataset
+ self._wavelengthEmissionActual = _PresentDataset
else: # if the dataset is not found on disk
- self._detectorLabels = _AbsentDataset
- if 'landmarkPos2D' in self._h:
+ self._wavelengthEmissionActual = _AbsentDataset
+ if 'dataType' in self._h:
if not self._cfg.dynamic_loading:
- self._landmarkPos2D = _read_float_array(
- self._h['landmarkPos2D'])
+ self._dataType = _read_int_array(self._h['dataType'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._landmarkPos2D = _PresentDataset
+ self._dataType = _PresentDataset
else: # if the dataset is not found on disk
- self._landmarkPos2D = _AbsentDataset
- if 'landmarkPos3D' in self._h:
+ self._dataType = _AbsentDataset
+ if 'dataUnit' in self._h:
if not self._cfg.dynamic_loading:
- self._landmarkPos3D = _read_float_array(
- self._h['landmarkPos3D'])
+ self._dataUnit = _read_string_array(self._h['dataUnit'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._landmarkPos3D = _PresentDataset
+ self._dataUnit = _PresentDataset
else: # if the dataset is not found on disk
- self._landmarkPos3D = _AbsentDataset
- if 'landmarkLabels' in self._h:
+ self._dataUnit = _AbsentDataset
+ if 'dataTypeLabel' in self._h:
if not self._cfg.dynamic_loading:
- self._landmarkLabels = _read_string_array(
- self._h['landmarkLabels'])
+ self._dataTypeLabel = _read_string_array(
+ self._h['dataTypeLabel'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._landmarkLabels = _PresentDataset
+ self._dataTypeLabel = _PresentDataset
else: # if the dataset is not found on disk
- self._landmarkLabels = _AbsentDataset
- if 'coordinateSystem' in self._h:
+ self._dataTypeLabel = _AbsentDataset
+ if 'dataTypeIndex' in self._h:
if not self._cfg.dynamic_loading:
- self._coordinateSystem = _read_string(
- self._h['coordinateSystem'])
+ self._dataTypeIndex = _read_int_array(self._h['dataTypeIndex'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._coordinateSystem = _PresentDataset
+ self._dataTypeIndex = _PresentDataset
else: # if the dataset is not found on disk
- self._coordinateSystem = _AbsentDataset
- if 'coordinateSystemDescription' in self._h:
+ self._dataTypeIndex = _AbsentDataset
+ if 'sourcePower' in self._h:
if not self._cfg.dynamic_loading:
- self._coordinateSystemDescription = _read_string(
- self._h['coordinateSystemDescription'])
+ self._sourcePower = _read_float_array(self._h['sourcePower'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._coordinateSystemDescription = _PresentDataset
+ self._sourcePower = _PresentDataset
else: # if the dataset is not found on disk
- self._coordinateSystemDescription = _AbsentDataset
- if 'useLocalIndex' in self._h:
+ self._sourcePower = _AbsentDataset
+ if 'detectorGain' in self._h:
if not self._cfg.dynamic_loading:
- self._useLocalIndex = _read_int(self._h['useLocalIndex'])
+ self._detectorGain = _read_float_array(self._h['detectorGain'])
else: # if the dataset is found on disk but dynamic_loading=True
- self._useLocalIndex = _PresentDataset
+ self._detectorGain = _PresentDataset
else: # if the dataset is not found on disk
- self._useLocalIndex = _AbsentDataset
+ self._detectorGain = _AbsentDataset
@property
- def wavelengths(self):
- """SNIRF field `wavelengths`.
+ def sourceIndex(self):
+ """SNIRF field `sourceIndex`.
If dynamic_loading=True, the data is loaded from the SNIRF file only
when accessed through the getter
- This field describes the "nominal" wavelengths used (in `nm` unit). This is indexed by the
- `wavelengthIndex` of the measurementList variable. For example, `probe.wavelengths` = [690,
- 780, 830]; implies that the measurements were taken at three wavelengths (690 nm,
- 780 nm, and 830 nm). The wavelength index of
- `measurementList(k).wavelengthIndex` variable refers to this field.
- `measurementList(k).wavelengthIndex` = 2 means the kth measurement
- was at 780 nm.
+ Source indices for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._sourceIndex is _AbsentDataset:
+ return None
+ if type(self._sourceIndex) is type(_PresentDataset):
+ return _read_int_array(self._h['sourceIndex'])
+ self._cfg.logger.info('Dynamically loaded %s/sourceIndex from %s',
+ self.location, self.filename)
+ return self._sourceIndex
- Please note that this field stores the "nominal" wavelengths. If the precise
- (measured) wavelengths differ from the nominal wavelengths, one can store those
+ @sourceIndex.setter
+ def sourceIndex(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._sourceIndex = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/sourceIndex in %s', self.location, self.filename)
+
+ @sourceIndex.deleter
+ def sourceIndex(self):
+ self._sourceIndex = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/sourceIndex from %s', self.location,
+ self.filename)
+
+ @property
+ def detectorIndex(self):
+ """SNIRF field `detectorIndex`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ Detector indices for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._detectorIndex is _AbsentDataset:
+ return None
+ if type(self._detectorIndex) is type(_PresentDataset):
+ return _read_int_array(self._h['detectorIndex'])
+ self._cfg.logger.info(
+ 'Dynamically loaded %s/detectorIndex from %s', self.location,
+ self.filename)
+ return self._detectorIndex
+
+ @detectorIndex.setter
+ def detectorIndex(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._detectorIndex = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/detectorIndex in %s', self.location, self.filename)
+
+ @detectorIndex.deleter
+ def detectorIndex(self):
+ self._detectorIndex = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/detectorIndex from %s',
+ self.location, self.filename)
+
+ @property
+ def wavelengthIndex(self):
+ """SNIRF field `wavelengthIndex`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ Index of the "nominal" wavelength (in `probe.wavelengths`) for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._wavelengthIndex is _AbsentDataset:
+ return None
+ if type(self._wavelengthIndex) is type(_PresentDataset):
+ return _read_int_array(self._h['wavelengthIndex'])
+ self._cfg.logger.info(
+ 'Dynamically loaded %s/wavelengthIndex from %s', self.location,
+ self.filename)
+ return self._wavelengthIndex
+
+ @wavelengthIndex.setter
+ def wavelengthIndex(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._wavelengthIndex = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/wavelengthIndex in %s', self.location, self.filename)
+
+ @wavelengthIndex.deleter
+ def wavelengthIndex(self):
+ self._wavelengthIndex = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/wavelengthIndex from %s',
+ self.location, self.filename)
+
+ @property
+ def wavelengthActual(self):
+ """SNIRF field `wavelengthActual`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ Actual (measured) wavelength in nm, if available, for the source in each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._wavelengthActual is _AbsentDataset:
+ return None
+ if type(self._wavelengthActual) is type(_PresentDataset):
+ return _read_float_array(self._h['wavelengthActual'])
+ self._cfg.logger.info(
+ 'Dynamically loaded %s/wavelengthActual from %s',
+ self.location, self.filename)
+ return self._wavelengthActual
+
+ @wavelengthActual.setter
+ def wavelengthActual(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._wavelengthActual = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/wavelengthActual in %s', self.location, self.filename)
+
+ @wavelengthActual.deleter
+ def wavelengthActual(self):
+ self._wavelengthActual = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/wavelengthActual from %s',
+ self.location, self.filename)
+
+ @property
+ def wavelengthEmissionActual(self):
+ """SNIRF field `wavelengthEmissionActual`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ Actual (measured) emission wavelength in nm, if available, for the source in each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._wavelengthEmissionActual is _AbsentDataset:
+ return None
+ if type(self._wavelengthEmissionActual) is type(_PresentDataset):
+ return _read_float_array(self._h['wavelengthEmissionActual'])
+ self._cfg.logger.info(
+ 'Dynamically loaded %s/wavelengthEmissionActual from %s',
+ self.location, self.filename)
+ return self._wavelengthEmissionActual
+
+ @wavelengthEmissionActual.setter
+ def wavelengthEmissionActual(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._wavelengthEmissionActual = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/wavelengthEmissionActual in %s', self.location, self.filename)
+
+ @wavelengthEmissionActual.deleter
+ def wavelengthEmissionActual(self):
+ self._wavelengthEmissionActual = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/wavelengthEmissionActual from %s',
+ self.location, self.filename)
+
+ @property
+ def dataType(self):
+ """SNIRF field `dataType`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. See Appendix for list of possible values.
+
+ """
+ if self._dataType is _AbsentDataset:
+ return None
+ if type(self._dataType) is type(_PresentDataset):
+ return _read_int_array(self._h['dataType'])
+ self._cfg.logger.info('Dynamically loaded %s/dataType from %s',
+ self.location, self.filename)
+ return self._dataType
+
+ @dataType.setter
+ def dataType(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._dataType = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/dataType in %s', self.location, self.filename)
+
+ @dataType.deleter
+ def dataType(self):
+ self._dataType = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/dataType from %s', self.location,
+ self.filename)
+
+ @property
+ def dataUnit(self):
+ """SNIRF field `dataUnit`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ International System of Units (SI units) identifier for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._dataUnit is _AbsentDataset:
+ return None
+ if type(self._dataUnit) is type(_PresentDataset):
+ return _read_string_array(self._h['dataUnit'])
+ self._cfg.logger.info('Dynamically loaded %s/dataUnit from %s',
+ self.location, self.filename)
+ return self._dataUnit
+
+ @dataUnit.setter
+ def dataUnit(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._dataUnit = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/dataUnit in %s', self.location, self.filename)
+
+ @dataUnit.deleter
+ def dataUnit(self):
+ self._dataUnit = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/dataUnit from %s', self.location,
+ self.filename)
+
+ @property
+ def dataTypeLabel(self):
+ """SNIRF field `dataTypeLabel`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ Data-type label. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+ """
+ if self._dataTypeLabel is _AbsentDataset:
+ return None
+ if type(self._dataTypeLabel) is type(_PresentDataset):
+ return _read_string_array(self._h['dataTypeLabel'])
+ self._cfg.logger.info(
+ 'Dynamically loaded %s/dataTypeLabel from %s', self.location,
+ self.filename)
+ return self._dataTypeLabel
+
+ @dataTypeLabel.setter
+ def dataTypeLabel(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._dataTypeLabel = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/dataTypeLabel in %s', self.location, self.filename)
+
+ @dataTypeLabel.deleter
+ def dataTypeLabel(self):
+ self._dataTypeLabel = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/dataTypeLabel from %s',
+ self.location, self.filename)
+
+ @property
+ def dataTypeIndex(self):
+ """SNIRF field `dataTypeIndex`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ Data-type specific parameter indices. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so `dataTimeIndex` must be a 2-D array with 2 columns that index the additional parameters.
+
+ """
+ if self._dataTypeIndex is _AbsentDataset:
+ return None
+ if type(self._dataTypeIndex) is type(_PresentDataset):
+ return _read_int_array(self._h['dataTypeIndex'])
+ self._cfg.logger.info(
+ 'Dynamically loaded %s/dataTypeIndex from %s', self.location,
+ self.filename)
+ return self._dataTypeIndex
+
+ @dataTypeIndex.setter
+ def dataTypeIndex(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._dataTypeIndex = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/dataTypeIndex in %s', self.location, self.filename)
+
+ @dataTypeIndex.deleter
+ def dataTypeIndex(self):
+ self._dataTypeIndex = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/dataTypeIndex from %s',
+ self.location, self.filename)
+
+ @property
+ def sourcePower(self):
+ """SNIRF field `sourcePower`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Units are optionally defined in `metaDataTags`.
+
+ """
+ if self._sourcePower is _AbsentDataset:
+ return None
+ if type(self._sourcePower) is type(_PresentDataset):
+ return _read_float_array(self._h['sourcePower'])
+ self._cfg.logger.info('Dynamically loaded %s/sourcePower from %s',
+ self.location, self.filename)
+ return self._sourcePower
+
+ @sourcePower.setter
+ def sourcePower(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._sourcePower = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/sourcePower in %s', self.location, self.filename)
+
+ @sourcePower.deleter
+ def sourcePower(self):
+ self._sourcePower = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/sourcePower from %s', self.location,
+ self.filename)
+
+ @property
+ def detectorGain(self):
+ """SNIRF field `detectorGain`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Units are optionally defined in `metaDataTags`.
+
+ """
+ if self._detectorGain is _AbsentDataset:
+ return None
+ if type(self._detectorGain) is type(_PresentDataset):
+ return _read_float_array(self._h['detectorGain'])
+ self._cfg.logger.info('Dynamically loaded %s/detectorGain from %s',
+ self.location, self.filename)
+ return self._detectorGain
+
+ @detectorGain.setter
+ def detectorGain(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._detectorGain = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/detectorGain in %s', self.location, self.filename)
+
+ @detectorGain.deleter
+ def detectorGain(self):
+ self._detectorGain = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/detectorGain from %s', self.location,
+ self.filename)
+
+ def _save(self, *args):
+ if len(args) > 0 and type(args[0]) is h5py.File:
+ file = args[0]
+ if self.location not in file:
+ file.create_group(self.location)
+ # self._cfg.logger.info('Created Group at %s in %s', self.location, file)
+ else:
+ if self.location not in file:
+ # Assign the wrapper to the new HDF5 Group on disk
+ self._h = file.create_group(self.location)
+ # self._cfg.logger.info('Created Group at %s in %s', self.location, file)
+ if self._h != {}:
+ file = self._h.file
+ else:
+ raise ValueError('Cannot save an anonymous ' +
+ self.__class__.__name__ +
+ ' instance without a filename')
+ name = self.location + '/sourceIndex'
+ if not self._sourceIndex is _AbsentDataset:
+ data = self.sourceIndex # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_int_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/detectorIndex'
+ if not self._detectorIndex is _AbsentDataset:
+ data = self.detectorIndex # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_int_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/wavelengthIndex'
+ if not self._wavelengthIndex is _AbsentDataset:
+ data = self.wavelengthIndex # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_int_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/wavelengthActual'
+ if not self._wavelengthActual is _AbsentDataset:
+ data = self.wavelengthActual # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_float_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/wavelengthEmissionActual'
+ if not self._wavelengthEmissionActual is _AbsentDataset:
+ data = self.wavelengthEmissionActual # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_float_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/dataType'
+ if not self._dataType is _AbsentDataset:
+ data = self.dataType # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_int_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/dataUnit'
+ if not self._dataUnit is _AbsentDataset:
+ data = self.dataUnit # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_string_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/dataTypeLabel'
+ if not self._dataTypeLabel is _AbsentDataset:
+ data = self.dataTypeLabel # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_string_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/dataTypeIndex'
+ if not self._dataTypeIndex is _AbsentDataset:
+ data = self.dataTypeIndex # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_int_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/sourcePower'
+ if not self._sourcePower is _AbsentDataset:
+ data = self.sourcePower # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_float_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/detectorGain'
+ if not self._detectorGain is _AbsentDataset:
+ data = self.detectorGain # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_float_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+
+ def _validate(self, result: ValidationResult):
+ # Validate unwritten datasets after writing them to this tempfile
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
+ name = self.location + '/sourceIndex'
+ if self._sourceIndex is _AbsentDataset:
+ result._add(name, 'REQUIRED_DATASET_MISSING')
+ else:
+ try:
+ if type(self._sourceIndex) is type(
+ _PresentDataset) or 'sourceIndex' in self._h:
+ dataset = self._h['sourceIndex']
+ else:
+ dataset = _create_dataset_int_array(
+ tmp, 'sourceIndex', self._sourceIndex)
+ result._add(name, _validate_int_array(dataset, ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/detectorIndex'
+ if self._detectorIndex is _AbsentDataset:
+ result._add(name, 'REQUIRED_DATASET_MISSING')
+ else:
+ try:
+ if type(self._detectorIndex) is type(
+ _PresentDataset) or 'detectorIndex' in self._h:
+ dataset = self._h['detectorIndex']
+ else:
+ dataset = _create_dataset_int_array(
+ tmp, 'detectorIndex', self._detectorIndex)
+ result._add(name, _validate_int_array(dataset, ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/wavelengthIndex'
+ if self._wavelengthIndex is _AbsentDataset:
+ result._add(name, 'REQUIRED_DATASET_MISSING')
+ else:
+ try:
+ if type(self._wavelengthIndex) is type(
+ _PresentDataset) or 'wavelengthIndex' in self._h:
+ dataset = self._h['wavelengthIndex']
+ else:
+ dataset = _create_dataset_int_array(
+ tmp, 'wavelengthIndex', self._wavelengthIndex)
+ result._add(name, _validate_int_array(dataset, ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/wavelengthActual'
+ if self._wavelengthActual is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._wavelengthActual) is type(
+ _PresentDataset) or 'wavelengthActual' in self._h:
+ dataset = self._h['wavelengthActual']
+ else:
+ dataset = _create_dataset_float_array(
+ tmp, 'wavelengthActual', self._wavelengthActual)
+ result._add(name, _validate_float_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/wavelengthEmissionActual'
+ if self._wavelengthEmissionActual is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._wavelengthEmissionActual) is type(
+ _PresentDataset
+ ) or 'wavelengthEmissionActual' in self._h:
+ dataset = self._h['wavelengthEmissionActual']
+ else:
+ dataset = _create_dataset_float_array(
+ tmp, 'wavelengthEmissionActual',
+ self._wavelengthEmissionActual)
+ result._add(name, _validate_float_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/dataType'
+ if self._dataType is _AbsentDataset:
+ result._add(name, 'REQUIRED_DATASET_MISSING')
+ else:
+ try:
+ if type(self._dataType) is type(
+ _PresentDataset) or 'dataType' in self._h:
+ dataset = self._h['dataType']
+ else:
+ dataset = _create_dataset_int_array(
+ tmp, 'dataType', self._dataType)
+ result._add(name, _validate_int_array(dataset, ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/dataUnit'
+ if self._dataUnit is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._dataUnit) is type(
+ _PresentDataset) or 'dataUnit' in self._h:
+ dataset = self._h['dataUnit']
+ else:
+ dataset = _create_dataset_string_array(
+ tmp, 'dataUnit', self._dataUnit)
+ result._add(name, _validate_string_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/dataTypeLabel'
+ if self._dataTypeLabel is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._dataTypeLabel) is type(
+ _PresentDataset) or 'dataTypeLabel' in self._h:
+ dataset = self._h['dataTypeLabel']
+ else:
+ dataset = _create_dataset_string_array(
+ tmp, 'dataTypeLabel', self._dataTypeLabel)
+ result._add(name, _validate_string_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/dataTypeIndex'
+ if self._dataTypeIndex is _AbsentDataset:
+ result._add(name, 'REQUIRED_DATASET_MISSING')
+ else:
+ try:
+ if type(self._dataTypeIndex) is type(
+ _PresentDataset) or 'dataTypeIndex' in self._h:
+ dataset = self._h['dataTypeIndex']
+ else:
+ dataset = _create_dataset_int_array(
+ tmp, 'dataTypeIndex', self._dataTypeIndex)
+ result._add(name, _validate_int_array(dataset, ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/sourcePower'
+ if self._sourcePower is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._sourcePower) is type(
+ _PresentDataset) or 'sourcePower' in self._h:
+ dataset = self._h['sourcePower']
+ else:
+ dataset = _create_dataset_float_array(
+ tmp, 'sourcePower', self._sourcePower)
+ result._add(name, _validate_float_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/detectorGain'
+ if self._detectorGain is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._detectorGain) is type(
+ _PresentDataset) or 'detectorGain' in self._h:
+ dataset = self._h['detectorGain']
+ else:
+ dataset = _create_dataset_float_array(
+ tmp, 'detectorGain', self._detectorGain)
+ result._add(name, _validate_float_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
+ for key in self._h.keys():
+ if not any(
+ [key.startswith(name) for name in self._snirf_names]):
+ if type(self._h[key]) is h5py.Group:
+ result._add(self.location + '/' + key,
+ 'UNRECOGNIZED_GROUP')
+ elif type(self._h[key]) is h5py.Dataset:
+ result._add(self.location + '/' + key,
+ 'UNRECOGNIZED_DATASET')
+
+
+class Probe(Group):
+ """Wrapper for Group of type `probe`.
+
+ This is a structured variable that describes the probe (source-detector)
+ geometry. This variable has a number of required fields.
+
+ """
+
+ def __init__(self, var, cfg: SnirfConfig):
+ super().__init__(var, cfg)
+ self._wavelengths = _AbsentDataset # [,...]*
+ self._wavelengthsEmission = _AbsentDataset # [,...]
+ self._sourcePos2D = _AbsentDataset # [[,...]]*2
+ self._sourcePos3D = _AbsentDataset # [[,...]]*2
+ self._detectorPos2D = _AbsentDataset # [[,...]]*3
+ self._detectorPos3D = _AbsentDataset # [[,...]]*3
+ self._frequencies = _AbsentDataset # [,...]
+ self._timeDelays = _AbsentDataset # [,...]
+ self._timeDelayWidths = _AbsentDataset # [,...]
+ self._momentOrders = _AbsentDataset # [,...]
+ self._correlationTimeDelays = _AbsentDataset # [,...]
+ self._correlationTimeDelayWidths = _AbsentDataset # [,...]
+ self._sourceLabels = _AbsentDataset # [["s",...]]
+ self._detectorLabels = _AbsentDataset # ["s",...]
+ self._landmarkPos2D = _AbsentDataset # [[,...]]
+ self._landmarkPos3D = _AbsentDataset # [[,...]]
+ self._landmarkLabels = _AbsentDataset # ["s",...]
+ self._coordinateSystem = _AbsentDataset # "s"
+ self._coordinateSystemDescription = _AbsentDataset # "s"
+ self._snirf_names = [
+ 'wavelengths',
+ 'wavelengthsEmission',
+ 'sourcePos2D',
+ 'sourcePos3D',
+ 'detectorPos2D',
+ 'detectorPos3D',
+ 'frequencies',
+ 'timeDelays',
+ 'timeDelayWidths',
+ 'momentOrders',
+ 'correlationTimeDelays',
+ 'correlationTimeDelayWidths',
+ 'sourceLabels',
+ 'detectorLabels',
+ 'landmarkPos2D',
+ 'landmarkPos3D',
+ 'landmarkLabels',
+ 'coordinateSystem',
+ 'coordinateSystemDescription',
+ ]
+
+ self._indexed_groups = []
+ if 'wavelengths' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._wavelengths = _read_float_array(self._h['wavelengths'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._wavelengths = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._wavelengths = _AbsentDataset
+ if 'wavelengthsEmission' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._wavelengthsEmission = _read_float_array(
+ self._h['wavelengthsEmission'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._wavelengthsEmission = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._wavelengthsEmission = _AbsentDataset
+ if 'sourcePos2D' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._sourcePos2D = _read_float_array(self._h['sourcePos2D'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._sourcePos2D = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._sourcePos2D = _AbsentDataset
+ if 'sourcePos3D' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._sourcePos3D = _read_float_array(self._h['sourcePos3D'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._sourcePos3D = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._sourcePos3D = _AbsentDataset
+ if 'detectorPos2D' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._detectorPos2D = _read_float_array(
+ self._h['detectorPos2D'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._detectorPos2D = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._detectorPos2D = _AbsentDataset
+ if 'detectorPos3D' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._detectorPos3D = _read_float_array(
+ self._h['detectorPos3D'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._detectorPos3D = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._detectorPos3D = _AbsentDataset
+ if 'frequencies' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._frequencies = _read_float_array(self._h['frequencies'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._frequencies = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._frequencies = _AbsentDataset
+ if 'timeDelays' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._timeDelays = _read_float_array(self._h['timeDelays'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._timeDelays = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._timeDelays = _AbsentDataset
+ if 'timeDelayWidths' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._timeDelayWidths = _read_float_array(
+ self._h['timeDelayWidths'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._timeDelayWidths = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._timeDelayWidths = _AbsentDataset
+ if 'momentOrders' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._momentOrders = _read_float_array(self._h['momentOrders'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._momentOrders = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._momentOrders = _AbsentDataset
+ if 'correlationTimeDelays' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._correlationTimeDelays = _read_float_array(
+ self._h['correlationTimeDelays'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._correlationTimeDelays = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._correlationTimeDelays = _AbsentDataset
+ if 'correlationTimeDelayWidths' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._correlationTimeDelayWidths = _read_float_array(
+ self._h['correlationTimeDelayWidths'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._correlationTimeDelayWidths = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._correlationTimeDelayWidths = _AbsentDataset
+ if 'sourceLabels' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._sourceLabels = _read_string_array(
+ self._h['sourceLabels'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._sourceLabels = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._sourceLabels = _AbsentDataset
+ if 'detectorLabels' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._detectorLabels = _read_string_array(
+ self._h['detectorLabels'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._detectorLabels = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._detectorLabels = _AbsentDataset
+ if 'landmarkPos2D' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._landmarkPos2D = _read_float_array(
+ self._h['landmarkPos2D'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._landmarkPos2D = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._landmarkPos2D = _AbsentDataset
+ if 'landmarkPos3D' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._landmarkPos3D = _read_float_array(
+ self._h['landmarkPos3D'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._landmarkPos3D = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._landmarkPos3D = _AbsentDataset
+ if 'landmarkLabels' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._landmarkLabels = _read_string_array(
+ self._h['landmarkLabels'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._landmarkLabels = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._landmarkLabels = _AbsentDataset
+ if 'coordinateSystem' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._coordinateSystem = _read_string(
+ self._h['coordinateSystem'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._coordinateSystem = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._coordinateSystem = _AbsentDataset
+ if 'coordinateSystemDescription' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._coordinateSystemDescription = _read_string(
+ self._h['coordinateSystemDescription'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._coordinateSystemDescription = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._coordinateSystemDescription = _AbsentDataset
+
+ @property
+ def wavelengths(self):
+ """SNIRF field `wavelengths`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ This field describes the "nominal" wavelengths used (in `nm` unit). This is indexed by the
+ `wavelengthIndex` of the measurementList variable. For example, `probe.wavelengths` = [690,
+ 780, 830]; implies that the measurements were taken at three wavelengths (690 nm,
+ 780 nm, and 830 nm). The wavelength index of
+ `measurementList(k).wavelengthIndex` variable refers to this field.
+ `measurementList(k).wavelengthIndex` = 2 means the kth measurement
+ was at 780 nm.
+
+ Please note that this field stores the "nominal" wavelengths. If the precise
+ (measured) wavelengths differ from the nominal wavelengths, one can store those
in the `measurementList.wavelengthActual` field in a per-channel fashion.
The number of wavelengths is not limited (except that at least two are needed
@@ -2138,7 +2950,7 @@ def wavelengths(self):
"""
- if type(self._wavelengths) is type(_AbsentDataset):
+ if self._wavelengths is _AbsentDataset:
return None
if type(self._wavelengths) is type(_PresentDataset):
return _read_float_array(self._h['wavelengths'])
@@ -2148,7 +2960,8 @@ def wavelengths(self):
@wavelengths.setter
def wavelengths(self, value):
- self._wavelengths = value
+ if value is not None and any([v is not None for v in value]):
+ self._wavelengths = np.array(value)
# self._cfg.logger.info('Assignment to %s/wavelengths in %s', self.location, self.filename)
@wavelengths.deleter
@@ -2175,7 +2988,7 @@ def wavelengthsEmission(self):
"""
- if type(self._wavelengthsEmission) is type(_AbsentDataset):
+ if self._wavelengthsEmission is _AbsentDataset:
return None
if type(self._wavelengthsEmission) is type(_PresentDataset):
return _read_float_array(self._h['wavelengthsEmission'])
@@ -2186,7 +2999,8 @@ def wavelengthsEmission(self):
@wavelengthsEmission.setter
def wavelengthsEmission(self, value):
- self._wavelengthsEmission = value
+ if value is not None and any([v is not None for v in value]):
+ self._wavelengthsEmission = np.array(value)
# self._cfg.logger.info('Assignment to %s/wavelengthsEmission in %s', self.location, self.filename)
@wavelengthsEmission.deleter
@@ -2210,7 +3024,7 @@ def sourcePos2D(self):
"""
- if type(self._sourcePos2D) is type(_AbsentDataset):
+ if self._sourcePos2D is _AbsentDataset:
return None
if type(self._sourcePos2D) is type(_PresentDataset):
return _read_float_array(self._h['sourcePos2D'])
@@ -2220,7 +3034,8 @@ def sourcePos2D(self):
@sourcePos2D.setter
def sourcePos2D(self, value):
- self._sourcePos2D = value
+ if value is not None and any([v is not None for v in value]):
+ self._sourcePos2D = np.array(value)
# self._cfg.logger.info('Assignment to %s/sourcePos2D in %s', self.location, self.filename)
@sourcePos2D.deleter
@@ -2241,7 +3056,7 @@ def sourcePos3D(self):
"""
- if type(self._sourcePos3D) is type(_AbsentDataset):
+ if self._sourcePos3D is _AbsentDataset:
return None
if type(self._sourcePos3D) is type(_PresentDataset):
return _read_float_array(self._h['sourcePos3D'])
@@ -2251,7 +3066,8 @@ def sourcePos3D(self):
@sourcePos3D.setter
def sourcePos3D(self, value):
- self._sourcePos3D = value
+ if value is not None and any([v is not None for v in value]):
+ self._sourcePos3D = np.array(value)
# self._cfg.logger.info('Assignment to %s/sourcePos3D in %s', self.location, self.filename)
@sourcePos3D.deleter
@@ -2272,7 +3088,7 @@ def detectorPos2D(self):
"""
- if type(self._detectorPos2D) is type(_AbsentDataset):
+ if self._detectorPos2D is _AbsentDataset:
return None
if type(self._detectorPos2D) is type(_PresentDataset):
return _read_float_array(self._h['detectorPos2D'])
@@ -2283,7 +3099,8 @@ def detectorPos2D(self):
@detectorPos2D.setter
def detectorPos2D(self, value):
- self._detectorPos2D = value
+ if value is not None and any([v is not None for v in value]):
+ self._detectorPos2D = np.array(value)
# self._cfg.logger.info('Assignment to %s/detectorPos2D in %s', self.location, self.filename)
@detectorPos2D.deleter
@@ -2304,7 +3121,7 @@ def detectorPos3D(self):
"""
- if type(self._detectorPos3D) is type(_AbsentDataset):
+ if self._detectorPos3D is _AbsentDataset:
return None
if type(self._detectorPos3D) is type(_PresentDataset):
return _read_float_array(self._h['detectorPos3D'])
@@ -2315,7 +3132,8 @@ def detectorPos3D(self):
@detectorPos3D.setter
def detectorPos3D(self, value):
- self._detectorPos3D = value
+ if value is not None and any([v is not None for v in value]):
+ self._detectorPos3D = np.array(value)
# self._cfg.logger.info('Assignment to %s/detectorPos3D in %s', self.location, self.filename)
@detectorPos3D.deleter
@@ -2337,7 +3155,7 @@ def frequencies(self):
"""
- if type(self._frequencies) is type(_AbsentDataset):
+ if self._frequencies is _AbsentDataset:
return None
if type(self._frequencies) is type(_PresentDataset):
return _read_float_array(self._h['frequencies'])
@@ -2347,7 +3165,8 @@ def frequencies(self):
@frequencies.setter
def frequencies(self, value):
- self._frequencies = value
+ if value is not None and any([v is not None for v in value]):
+ self._frequencies = np.array(value)
# self._cfg.logger.info('Assignment to %s/frequencies in %s', self.location, self.filename)
@frequencies.deleter
@@ -2370,7 +3189,7 @@ def timeDelays(self):
"""
- if type(self._timeDelays) is type(_AbsentDataset):
+ if self._timeDelays is _AbsentDataset:
return None
if type(self._timeDelays) is type(_PresentDataset):
return _read_float_array(self._h['timeDelays'])
@@ -2380,7 +3199,8 @@ def timeDelays(self):
@timeDelays.setter
def timeDelays(self, value):
- self._timeDelays = value
+ if value is not None and any([v is not None for v in value]):
+ self._timeDelays = np.array(value)
# self._cfg.logger.info('Assignment to %s/timeDelays in %s', self.location, self.filename)
@timeDelays.deleter
@@ -2403,7 +3223,7 @@ def timeDelayWidths(self):
"""
- if type(self._timeDelayWidths) is type(_AbsentDataset):
+ if self._timeDelayWidths is _AbsentDataset:
return None
if type(self._timeDelayWidths) is type(_PresentDataset):
return _read_float_array(self._h['timeDelayWidths'])
@@ -2414,7 +3234,8 @@ def timeDelayWidths(self):
@timeDelayWidths.setter
def timeDelayWidths(self, value):
- self._timeDelayWidths = value
+ if value is not None and any([v is not None for v in value]):
+ self._timeDelayWidths = np.array(value)
# self._cfg.logger.info('Assignment to %s/timeDelayWidths in %s', self.location, self.filename)
@timeDelayWidths.deleter
@@ -2442,7 +3263,7 @@ def momentOrders(self):
"""
- if type(self._momentOrders) is type(_AbsentDataset):
+ if self._momentOrders is _AbsentDataset:
return None
if type(self._momentOrders) is type(_PresentDataset):
return _read_float_array(self._h['momentOrders'])
@@ -2452,7 +3273,8 @@ def momentOrders(self):
@momentOrders.setter
def momentOrders(self, value):
- self._momentOrders = value
+ if value is not None and any([v is not None for v in value]):
+ self._momentOrders = np.array(value)
# self._cfg.logger.info('Assignment to %s/momentOrders in %s', self.location, self.filename)
@momentOrders.deleter
@@ -2475,7 +3297,7 @@ def correlationTimeDelays(self):
"""
- if type(self._correlationTimeDelays) is type(_AbsentDataset):
+ if self._correlationTimeDelays is _AbsentDataset:
return None
if type(self._correlationTimeDelays) is type(_PresentDataset):
return _read_float_array(self._h['correlationTimeDelays'])
@@ -2486,7 +3308,8 @@ def correlationTimeDelays(self):
@correlationTimeDelays.setter
def correlationTimeDelays(self, value):
- self._correlationTimeDelays = value
+ if value is not None and any([v is not None for v in value]):
+ self._correlationTimeDelays = np.array(value)
# self._cfg.logger.info('Assignment to %s/correlationTimeDelays in %s', self.location, self.filename)
@correlationTimeDelays.deleter
@@ -2509,7 +3332,7 @@ def correlationTimeDelayWidths(self):
"""
- if type(self._correlationTimeDelayWidths) is type(_AbsentDataset):
+ if self._correlationTimeDelayWidths is _AbsentDataset:
return None
if type(self._correlationTimeDelayWidths) is type(_PresentDataset):
return _read_float_array(self._h['correlationTimeDelayWidths'])
@@ -2520,7 +3343,8 @@ def correlationTimeDelayWidths(self):
@correlationTimeDelayWidths.setter
def correlationTimeDelayWidths(self, value):
- self._correlationTimeDelayWidths = value
+ if value is not None and any([v is not None for v in value]):
+ self._correlationTimeDelayWidths = np.array(value)
# self._cfg.logger.info('Assignment to %s/correlationTimeDelayWidths in %s', self.location, self.filename)
@correlationTimeDelayWidths.deleter
@@ -2545,7 +3369,7 @@ def sourceLabels(self):
"""
- if type(self._sourceLabels) is type(_AbsentDataset):
+ if self._sourceLabels is _AbsentDataset:
return None
if type(self._sourceLabels) is type(_PresentDataset):
return _read_string_array(self._h['sourceLabels'])
@@ -2555,7 +3379,8 @@ def sourceLabels(self):
@sourceLabels.setter
def sourceLabels(self, value):
- self._sourceLabels = value
+ if value is not None and any([v is not None for v in value]):
+ self._sourceLabels = np.array(value)
# self._cfg.logger.info('Assignment to %s/sourceLabels in %s', self.location, self.filename)
@sourceLabels.deleter
@@ -2578,7 +3403,7 @@ def detectorLabels(self):
"""
- if type(self._detectorLabels) is type(_AbsentDataset):
+ if self._detectorLabels is _AbsentDataset:
return None
if type(self._detectorLabels) is type(_PresentDataset):
return _read_string_array(self._h['detectorLabels'])
@@ -2589,7 +3414,8 @@ def detectorLabels(self):
@detectorLabels.setter
def detectorLabels(self, value):
- self._detectorLabels = value
+ if value is not None and any([v is not None for v in value]):
+ self._detectorLabels = np.array(value)
# self._cfg.logger.info('Assignment to %s/detectorLabels in %s', self.location, self.filename)
@detectorLabels.deleter
@@ -2615,7 +3441,7 @@ def landmarkPos2D(self):
"""
- if type(self._landmarkPos2D) is type(_AbsentDataset):
+ if self._landmarkPos2D is _AbsentDataset:
return None
if type(self._landmarkPos2D) is type(_PresentDataset):
return _read_float_array(self._h['landmarkPos2D'])
@@ -2626,7 +3452,8 @@ def landmarkPos2D(self):
@landmarkPos2D.setter
def landmarkPos2D(self, value):
- self._landmarkPos2D = value
+ if value is not None and any([v is not None for v in value]):
+ self._landmarkPos2D = np.array(value)
# self._cfg.logger.info('Assignment to %s/landmarkPos2D in %s', self.location, self.filename)
@landmarkPos2D.deleter
@@ -2652,7 +3479,7 @@ def landmarkPos3D(self):
"""
- if type(self._landmarkPos3D) is type(_AbsentDataset):
+ if self._landmarkPos3D is _AbsentDataset:
return None
if type(self._landmarkPos3D) is type(_PresentDataset):
return _read_float_array(self._h['landmarkPos3D'])
@@ -2663,7 +3490,8 @@ def landmarkPos3D(self):
@landmarkPos3D.setter
def landmarkPos3D(self, value):
- self._landmarkPos3D = value
+ if value is not None and any([v is not None for v in value]):
+ self._landmarkPos3D = np.array(value)
# self._cfg.logger.info('Assignment to %s/landmarkPos3D in %s', self.location, self.filename)
@landmarkPos3D.deleter
@@ -2690,7 +3518,7 @@ def landmarkLabels(self):
"""
- if type(self._landmarkLabels) is type(_AbsentDataset):
+ if self._landmarkLabels is _AbsentDataset:
return None
if type(self._landmarkLabels) is type(_PresentDataset):
return _read_string_array(self._h['landmarkLabels'])
@@ -2701,7 +3529,8 @@ def landmarkLabels(self):
@landmarkLabels.setter
def landmarkLabels(self, value):
- self._landmarkLabels = value
+ if value is not None and any([v is not None for v in value]):
+ self._landmarkLabels = np.array(value)
# self._cfg.logger.info('Assignment to %s/landmarkLabels in %s', self.location, self.filename)
@landmarkLabels.deleter
@@ -2728,7 +3557,7 @@ def coordinateSystem(self):
"""
- if type(self._coordinateSystem) is type(_AbsentDataset):
+ if self._coordinateSystem is _AbsentDataset:
return None
if type(self._coordinateSystem) is type(_PresentDataset):
return _read_string(self._h['coordinateSystem'])
@@ -2762,7 +3591,7 @@ def coordinateSystemDescription(self):
"""
- if type(self._coordinateSystemDescription) is type(_AbsentDataset):
+ if self._coordinateSystemDescription is _AbsentDataset:
return None
if type(self._coordinateSystemDescription) is type(_PresentDataset):
return _read_string(self._h['coordinateSystemDescription'])
@@ -2782,43 +3611,6 @@ def coordinateSystemDescription(self):
self._cfg.logger.info('Deleted %s/coordinateSystemDescription from %s',
self.location, self.filename)
- @property
- def useLocalIndex(self):
- """SNIRF field `useLocalIndex`.
-
- If dynamic_loading=True, the data is loaded from the SNIRF file only
- when accessed through the getter
-
- For modular NIRS systems, setting this flag to a non-zero integer indicates
- that `measurementList(k).sourceIndex` and `measurementList(k).detectorIndex`
- are module-specific local-indices. One must also include
- `measurementList(k).moduleIndex`, or when cross-module channels present, both
- `measurementList(k).sourceModuleIndex` and `measurementList(k).detectorModuleIndex`
- in the `measurementList` structure in order to restore the global indices
- of the sources/detectors.
-
-
- """
- if type(self._useLocalIndex) is type(_AbsentDataset):
- return None
- if type(self._useLocalIndex) is type(_PresentDataset):
- return _read_int(self._h['useLocalIndex'])
- self._cfg.logger.info(
- 'Dynamically loaded %s/useLocalIndex from %s', self.location,
- self.filename)
- return self._useLocalIndex
-
- @useLocalIndex.setter
- def useLocalIndex(self, value):
- self._useLocalIndex = value
- # self._cfg.logger.info('Assignment to %s/useLocalIndex in %s', self.location, self.filename)
-
- @useLocalIndex.deleter
- def useLocalIndex(self):
- self._useLocalIndex = _AbsentDataset
- self._cfg.logger.info('Deleted %s/useLocalIndex from %s',
- self.location, self.filename)
-
def _save(self, *args):
if len(args) > 0 and type(args[0]) is h5py.File:
file = args[0]
@@ -2837,7 +3629,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/wavelengths'
- if type(self._wavelengths) not in [type(_AbsentDataset), type(None)]:
+ if not self._wavelengths is _AbsentDataset:
data = self.wavelengths # Use loader function via getter
if name in file:
del file[name]
@@ -2848,9 +3640,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/wavelengthsEmission'
- if type(self._wavelengthsEmission) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._wavelengthsEmission is _AbsentDataset:
data = self.wavelengthsEmission # Use loader function via getter
if name in file:
del file[name]
@@ -2861,7 +3651,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/sourcePos2D'
- if type(self._sourcePos2D) not in [type(_AbsentDataset), type(None)]:
+ if not self._sourcePos2D is _AbsentDataset:
data = self.sourcePos2D # Use loader function via getter
if name in file:
del file[name]
@@ -2872,7 +3662,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/sourcePos3D'
- if type(self._sourcePos3D) not in [type(_AbsentDataset), type(None)]:
+ if not self._sourcePos3D is _AbsentDataset:
data = self.sourcePos3D # Use loader function via getter
if name in file:
del file[name]
@@ -2883,7 +3673,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/detectorPos2D'
- if type(self._detectorPos2D) not in [type(_AbsentDataset), type(None)]:
+ if not self._detectorPos2D is _AbsentDataset:
data = self.detectorPos2D # Use loader function via getter
if name in file:
del file[name]
@@ -2894,7 +3684,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/detectorPos3D'
- if type(self._detectorPos3D) not in [type(_AbsentDataset), type(None)]:
+ if not self._detectorPos3D is _AbsentDataset:
data = self.detectorPos3D # Use loader function via getter
if name in file:
del file[name]
@@ -2905,7 +3695,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/frequencies'
- if type(self._frequencies) not in [type(_AbsentDataset), type(None)]:
+ if not self._frequencies is _AbsentDataset:
data = self.frequencies # Use loader function via getter
if name in file:
del file[name]
@@ -2916,7 +3706,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/timeDelays'
- if type(self._timeDelays) not in [type(_AbsentDataset), type(None)]:
+ if not self._timeDelays is _AbsentDataset:
data = self.timeDelays # Use loader function via getter
if name in file:
del file[name]
@@ -2927,9 +3717,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/timeDelayWidths'
- if type(self._timeDelayWidths) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._timeDelayWidths is _AbsentDataset:
data = self.timeDelayWidths # Use loader function via getter
if name in file:
del file[name]
@@ -2940,7 +3728,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/momentOrders'
- if type(self._momentOrders) not in [type(_AbsentDataset), type(None)]:
+ if not self._momentOrders is _AbsentDataset:
data = self.momentOrders # Use loader function via getter
if name in file:
del file[name]
@@ -2951,9 +3739,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/correlationTimeDelays'
- if type(self._correlationTimeDelays) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._correlationTimeDelays is _AbsentDataset:
data = self.correlationTimeDelays # Use loader function via getter
if name in file:
del file[name]
@@ -2964,9 +3750,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/correlationTimeDelayWidths'
- if type(self._correlationTimeDelayWidths) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._correlationTimeDelayWidths is _AbsentDataset:
data = self.correlationTimeDelayWidths # Use loader function via getter
if name in file:
del file[name]
@@ -2977,7 +3761,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/sourceLabels'
- if type(self._sourceLabels) not in [type(_AbsentDataset), type(None)]:
+ if not self._sourceLabels is _AbsentDataset:
data = self.sourceLabels # Use loader function via getter
if name in file:
del file[name]
@@ -2988,9 +3772,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/detectorLabels'
- if type(self._detectorLabels) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._detectorLabels is _AbsentDataset:
data = self.detectorLabels # Use loader function via getter
if name in file:
del file[name]
@@ -3001,7 +3783,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/landmarkPos2D'
- if type(self._landmarkPos2D) not in [type(_AbsentDataset), type(None)]:
+ if not self._landmarkPos2D is _AbsentDataset:
data = self.landmarkPos2D # Use loader function via getter
if name in file:
del file[name]
@@ -3012,7 +3794,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/landmarkPos3D'
- if type(self._landmarkPos3D) not in [type(_AbsentDataset), type(None)]:
+ if not self._landmarkPos3D is _AbsentDataset:
data = self.landmarkPos3D # Use loader function via getter
if name in file:
del file[name]
@@ -3023,9 +3805,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/landmarkLabels'
- if type(self._landmarkLabels) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._landmarkLabels is _AbsentDataset:
data = self.landmarkLabels # Use loader function via getter
if name in file:
del file[name]
@@ -3036,9 +3816,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/coordinateSystem'
- if type(self._coordinateSystem) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._coordinateSystem is _AbsentDataset:
data = self.coordinateSystem # Use loader function via getter
if name in file:
del file[name]
@@ -3049,9 +3827,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/coordinateSystemDescription'
- if type(self._coordinateSystemDescription) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._coordinateSystemDescription is _AbsentDataset:
data = self.coordinateSystemDescription # Use loader function via getter
if name in file:
del file[name]
@@ -3061,23 +3837,15 @@ def _save(self, *args):
if name in file:
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
- name = self.location + '/useLocalIndex'
- if type(self._useLocalIndex) not in [type(_AbsentDataset), type(None)]:
- data = self.useLocalIndex # Use loader function via getter
- if name in file:
- del file[name]
- _create_dataset_int(file, name, data)
- # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
- else:
- if name in file:
- del file[name]
- self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/wavelengths'
- if type(self._wavelengths) in [type(_AbsentDataset), type(None)]:
+ if self._wavelengths is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3092,9 +3860,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/wavelengthsEmission'
- if type(self._wavelengthsEmission) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._wavelengthsEmission is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3111,7 +3877,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/sourcePos2D'
- if type(self._sourcePos2D) in [type(_AbsentDataset), type(None)]:
+ if self._sourcePos2D is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3126,7 +3892,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/sourcePos3D'
- if type(self._sourcePos3D) in [type(_AbsentDataset), type(None)]:
+ if self._sourcePos3D is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3141,7 +3907,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/detectorPos2D'
- if type(self._detectorPos2D) in [type(_AbsentDataset), type(None)]:
+ if self._detectorPos2D is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3156,7 +3922,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/detectorPos3D'
- if type(self._detectorPos3D) in [type(_AbsentDataset), type(None)]:
+ if self._detectorPos3D is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3171,7 +3937,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/frequencies'
- if type(self._frequencies) in [type(_AbsentDataset), type(None)]:
+ if self._frequencies is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3186,7 +3952,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/timeDelays'
- if type(self._timeDelays) in [type(_AbsentDataset), type(None)]:
+ if self._timeDelays is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3201,9 +3967,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/timeDelayWidths'
- if type(self._timeDelayWidths) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._timeDelayWidths is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3218,7 +3982,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/momentOrders'
- if type(self._momentOrders) in [type(_AbsentDataset), type(None)]:
+ if self._momentOrders is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3233,9 +3997,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/correlationTimeDelays'
- if type(self._correlationTimeDelays) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._correlationTimeDelays is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3252,9 +4014,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/correlationTimeDelayWidths'
- if type(self._correlationTimeDelayWidths) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._correlationTimeDelayWidths is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3271,7 +4031,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/sourceLabels'
- if type(self._sourceLabels) in [type(_AbsentDataset), type(None)]:
+ if self._sourceLabels is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3286,9 +4046,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/detectorLabels'
- if type(self._detectorLabels) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._detectorLabels is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3303,7 +4061,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/landmarkPos2D'
- if type(self._landmarkPos2D) in [type(_AbsentDataset), type(None)]:
+ if self._landmarkPos2D is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3318,7 +4076,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/landmarkPos3D'
- if type(self._landmarkPos3D) in [type(_AbsentDataset), type(None)]:
+ if self._landmarkPos3D is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3333,9 +4091,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/landmarkLabels'
- if type(self._landmarkLabels) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._landmarkLabels is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3350,9 +4106,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/coordinateSystem'
- if type(self._coordinateSystem) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._coordinateSystem is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3366,9 +4120,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/coordinateSystemDescription'
- if type(self._coordinateSystemDescription) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._coordinateSystemDescription is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -3383,26 +4135,6 @@ def _validate(self, result: ValidationResult):
result._add(name, _validate_string(dataset))
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
- name = self.location + '/useLocalIndex'
- if type(self._useLocalIndex) in [type(_AbsentDataset), type(None)]:
- result._add(name, 'OPTIONAL_DATASET_MISSING')
- else:
- try:
- if type(self._useLocalIndex) is type(
- _PresentDataset) or 'useLocalIndex' in self._h:
- dataset = self._h['useLocalIndex']
- else:
- dataset = _create_dataset_int(tmp, 'useLocalIndex',
- self._useLocalIndex)
- err_code = _validate_int(dataset)
- if _read_int(dataset) < 0 and err_code == 'OK':
- result._add(name, 'NEGATIVE_INDEX')
- elif _read_int(dataset) == 0 and err_code == 'OK':
- result._add(name, 'INDEX_OF_ZERO')
- else:
- result._add(name, err_code)
- except ValueError: # If the _create_dataset function can't convert the data
- result._add(name, 'INVALID_DATASET_TYPE')
for key in self._h.keys():
if not any(
[key.startswith(name) for name in self._snirf_names]):
@@ -3416,6 +4148,7 @@ def _validate(self, result: ValidationResult):
class NirsElement(Group):
"""Wrapper for an element of indexed group `Nirs`."""
+
def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
super().__init__(gid, cfg)
self._metaDataTags = _AbsentGroup # {.}*
@@ -3467,7 +4200,7 @@ def metaDataTags(self):
The below five metadata records are minimally required in a SNIRF file
"""
- if type(self._metaDataTags) is type(_AbsentGroup):
+ if self._metaDataTags is _AbsentGroup:
return None
return self._metaDataTags
@@ -3555,7 +4288,7 @@ def probe(self):
geometry. This variable has a number of required fields.
"""
- if type(self._probe) is type(_AbsentGroup):
+ if self._probe is _AbsentGroup:
return None
return self._probe
@@ -3616,34 +4349,40 @@ def _save(self, *args):
raise ValueError('Cannot save an anonymous ' +
self.__class__.__name__ +
' instance without a filename')
- if type(self._metaDataTags) is type(
- _AbsentGroup) or self._metaDataTags.is_empty():
- if 'metaDataTags' in file:
- del file['metaDataTags']
+ name = self.location + '/metaDataTags'
+ if self._metaDataTags is _AbsentGroup or self._metaDataTags.is_empty():
+ if name in file:
+ del file[name]
self._cfg.logger.info('Deleted Group %s/metaDataTags from %s',
self.location, file)
else:
self.metaDataTags._save(*args)
+ name = self.location + '/data'
self.data._save(*args)
+ name = self.location + '/stim'
self.stim._save(*args)
- if type(self._probe) is type(_AbsentGroup) or self._probe.is_empty():
- if 'probe' in file:
- del file['probe']
+ name = self.location + '/probe'
+ if self._probe is _AbsentGroup or self._probe.is_empty():
+ if name in file:
+ del file[name]
self._cfg.logger.info('Deleted Group %s/probe from %s',
self.location, file)
else:
self.probe._save(*args)
+ name = self.location + '/aux'
self.aux._save(*args)
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/metaDataTags'
# If Group is not present in file and empty in the wrapper, it is missing
- if type(self._metaDataTags) in [
- type(_AbsentGroup), type(None)
- ] or ('metaDataTags' not in self._h
- and self._metaDataTags.is_empty()):
+ if self._metaDataTags is _AbsentGroup or (
+ 'metaDataTags' not in self._h
+ and self._metaDataTags.is_empty()):
result._add(name, 'REQUIRED_GROUP_MISSING')
else:
self._metaDataTags._validate(result)
@@ -3659,9 +4398,8 @@ def _validate(self, result: ValidationResult):
self.stim._validate(result)
name = self.location + '/probe'
# If Group is not present in file and empty in the wrapper, it is missing
- if type(self._probe) in [
- type(_AbsentGroup), type(None)
- ] or ('probe' not in self._h and self._probe.is_empty()):
+ if self._probe is _AbsentGroup or ('probe' not in self._h
+ and self._probe.is_empty()):
result._add(name, 'REQUIRED_GROUP_MISSING')
else:
self._probe._validate(result)
@@ -3708,15 +4446,20 @@ def __init__(self, h: h5py.File, cfg: SnirfConfig):
class DataElement(Group):
"""Wrapper for an element of indexed group `Data`."""
+
def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
super().__init__(gid, cfg)
self._dataTimeSeries = _AbsentDataset # [[,...]]*
+ self._dataOffset = _AbsentDataset # [,...]
self._time = _AbsentDataset # [,...]*
- self._measurementList = _AbsentDataset # {i}*
+ self._measurementList = _AbsentDataset # {i}*1
+ self._measurementLists = _AbsentGroup # {.}*1
self._snirf_names = [
'dataTimeSeries',
+ 'dataOffset',
'time',
'measurementList',
+ 'measurementLists',
]
self._indexed_groups = []
@@ -3728,6 +4471,13 @@ def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
self._dataTimeSeries = _PresentDataset
else: # if the dataset is not found on disk
self._dataTimeSeries = _AbsentDataset
+ if 'dataOffset' in self._h:
+ if not self._cfg.dynamic_loading:
+ self._dataOffset = _read_float_array(self._h['dataOffset'])
+ else: # if the dataset is found on disk but dynamic_loading=True
+ self._dataOffset = _PresentDataset
+ else: # if the dataset is not found on disk
+ self._dataOffset = _AbsentDataset
if 'time' in self._h:
if not self._cfg.dynamic_loading:
self._time = _read_float_array(self._h['time'])
@@ -3738,6 +4488,13 @@ def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
self.measurementList = MeasurementList(self,
self._cfg) # Indexed group
self._indexed_groups.append(self.measurementList)
+ if 'measurementLists' in self._h:
+ self._measurementLists = MeasurementLists(
+ self._h['measurementLists'].id, self._cfg) # Group
+ else:
+ self._measurementLists = MeasurementLists(
+ self.location + '/' + 'measurementLists',
+ self._cfg) # Anonymous group (wrapper only)
@property
def dataTimeSeries(self):
@@ -3757,8 +4514,9 @@ def dataTimeSeries(self):
Chunked data is allowed to support real-time streaming of data in this array.
+
"""
- if type(self._dataTimeSeries) is type(_AbsentDataset):
+ if self._dataTimeSeries is _AbsentDataset:
return None
if type(self._dataTimeSeries) is type(_PresentDataset):
return _read_float_array(self._h['dataTimeSeries'])
@@ -3769,7 +4527,8 @@ def dataTimeSeries(self):
@dataTimeSeries.setter
def dataTimeSeries(self, value):
- self._dataTimeSeries = value
+ if value is not None and any([v is not None for v in value]):
+ self._dataTimeSeries = np.array(value)
# self._cfg.logger.info('Assignment to %s/dataTimeSeries in %s', self.location, self.filename)
@dataTimeSeries.deleter
@@ -3778,6 +4537,41 @@ def dataTimeSeries(self):
self._cfg.logger.info('Deleted %s/dataTimeSeries from %s',
self.location, self.filename)
+ @property
+ def dataOffset(self):
+ """SNIRF field `dataOffset`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ This stores an optional offset value per channel, which, when added to
+ `/nirs(i)/data(j)/dataTimeSeries`, results in absolute data values.
+
+ The length of this array is equal to the as represented
+ by the second dimension in the `dataTimeSeries`.
+
+
+ """
+ if self._dataOffset is _AbsentDataset:
+ return None
+ if type(self._dataOffset) is type(_PresentDataset):
+ return _read_float_array(self._h['dataOffset'])
+ self._cfg.logger.info('Dynamically loaded %s/dataOffset from %s',
+ self.location, self.filename)
+ return self._dataOffset
+
+ @dataOffset.setter
+ def dataOffset(self, value):
+ if value is not None and any([v is not None for v in value]):
+ self._dataOffset = np.array(value)
+ # self._cfg.logger.info('Assignment to %s/dataOffset in %s', self.location, self.filename)
+
+ @dataOffset.deleter
+ def dataOffset(self):
+ self._dataOffset = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/dataOffset from %s', self.location,
+ self.filename)
+
@property
def time(self):
"""SNIRF field `time`.
@@ -3802,7 +4596,7 @@ def time(self):
Chunked data is allowed to support real-time streaming of data in this array.
"""
- if type(self._time) is type(_AbsentDataset):
+ if self._time is _AbsentDataset:
return None
if type(self._time) is type(_PresentDataset):
return _read_float_array(self._h['time'])
@@ -3812,7 +4606,8 @@ def time(self):
@time.setter
def time(self, value):
- self._time = value
+ if value is not None and any([v is not None for v in value]):
+ self._time = np.array(value)
# self._cfg.logger.info('Assignment to %s/time in %s', self.location, self.filename)
@time.deleter
@@ -3855,6 +4650,41 @@ def measurementList(self):
self._cfg.logger.info('Deleted %s/measurementList from %s',
self.location, self.filename)
+ @property
+ def measurementLists(self):
+ """SNIRF field `measurementLists`.
+
+ If dynamic_loading=True, the data is loaded from the SNIRF file only
+ when accessed through the getter
+
+ The group for measurement list variables which map the data array onto the probe geometry (sources and detectors), data type, and wavelength. This group's datasets are arrays with size ``, with each position describing the corresponding column in the data matrix. (i.e. the values at `measurementLists/sourceIndex(3)` and `measurementLists/detectorIndex(3)` correspond to `dataTimeSeries(:,3)`).
+
+ This group is required only if the indexed-group format `/nirs(i)/data(j)/measurementList(k)` is not used to encode the measurement list. `measurementLists` is an alternative that may offer better performance for larger probes.
+
+ The arrays of `measurementLists` are:
+
+ """
+ if self._measurementLists is _AbsentGroup:
+ return None
+ return self._measurementLists
+
+ @measurementLists.setter
+ def measurementLists(self, value):
+ if isinstance(value, MeasurementLists):
+ self._measurementLists = _recursive_hdf5_copy(
+ self._measurementLists, value)
+ else:
+ raise ValueError(
+ "Only a Group of type MeasurementLists can be assigned to measurementLists."
+ )
+ # self._cfg.logger.info('Assignment to %s/measurementLists in %s', self.location, self.filename)
+
+ @measurementLists.deleter
+ def measurementLists(self):
+ self._measurementLists = _AbsentGroup
+ self._cfg.logger.info('Deleted %s/measurementLists from %s',
+ self.location, self.filename)
+
def _save(self, *args):
if len(args) > 0 and type(args[0]) is h5py.File:
file = args[0]
@@ -3873,9 +4703,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/dataTimeSeries'
- if type(self._dataTimeSeries) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._dataTimeSeries is _AbsentDataset:
data = self.dataTimeSeries # Use loader function via getter
if name in file:
del file[name]
@@ -3885,8 +4713,19 @@ def _save(self, *args):
if name in file:
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/dataOffset'
+ if not self._dataOffset is _AbsentDataset:
+ data = self.dataOffset # Use loader function via getter
+ if name in file:
+ del file[name]
+ _create_dataset_float_array(file, name, data, ndim=1)
+ # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
+ else:
+ if name in file:
+ del file[name]
+ self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/time'
- if type(self._time) not in [type(_AbsentDataset), type(None)]:
+ if not self._time is _AbsentDataset:
data = self.time # Use loader function via getter
if name in file:
del file[name]
@@ -3896,15 +4735,27 @@ def _save(self, *args):
if name in file:
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/measurementList'
self.measurementList._save(*args)
+ name = self.location + '/measurementLists'
+ if self._measurementLists is _AbsentGroup or self._measurementLists.is_empty(
+ ):
+ if name in file:
+ del file[name]
+ self._cfg.logger.info(
+ 'Deleted Group %s/measurementLists from %s', self.location,
+ file)
+ else:
+ self.measurementLists._save(*args)
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/dataTimeSeries'
- if type(self._dataTimeSeries) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._dataTimeSeries is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3918,8 +4769,23 @@ def _validate(self, result: ValidationResult):
ndims=[2]))
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
+ name = self.location + '/dataOffset'
+ if self._dataOffset is _AbsentDataset:
+ result._add(name, 'OPTIONAL_DATASET_MISSING')
+ else:
+ try:
+ if type(self._dataOffset) is type(
+ _PresentDataset) or 'dataOffset' in self._h:
+ dataset = self._h['dataOffset']
+ else:
+ dataset = _create_dataset_float_array(
+ tmp, 'dataOffset', self._dataOffset)
+ result._add(name, _validate_float_array(dataset,
+ ndims=[1]))
+ except ValueError: # If the _create_dataset function can't convert the data
+ result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/time'
- if type(self._time) in [type(_AbsentDataset), type(None)]:
+ if self._time is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -3938,6 +4804,14 @@ def _validate(self, result: ValidationResult):
result._add(name, 'REQUIRED_INDEXED_GROUP_EMPTY')
else:
self.measurementList._validate(result)
+ name = self.location + '/measurementLists'
+ # If Group is not present in file and empty in the wrapper, it is missing
+ if self._measurementLists is _AbsentGroup or (
+ 'measurementLists' not in self._h
+ and self._measurementLists.is_empty()):
+ result._add(name, 'REQUIRED_GROUP_MISSING')
+ else:
+ self._measurementLists._validate(result)
for key in self._h.keys():
if not any(
[key.startswith(name) for name in self._snirf_names]):
@@ -3974,6 +4848,7 @@ def __init__(self, h: h5py.File, cfg: SnirfConfig):
class MeasurementListElement(Group):
"""Wrapper for an element of indexed group `MeasurementList`."""
+
def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
super().__init__(gid, cfg)
self._sourceIndex = _AbsentDataset # *
@@ -3987,9 +4862,6 @@ def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
self._dataTypeIndex = _AbsentDataset # *
self._sourcePower = _AbsentDataset #
self._detectorGain = _AbsentDataset #
- self._moduleIndex = _AbsentDataset #
- self._sourceModuleIndex = _AbsentDataset #
- self._detectorModuleIndex = _AbsentDataset #
self._snirf_names = [
'sourceIndex',
'detectorIndex',
@@ -4002,9 +4874,6 @@ def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
'dataTypeIndex',
'sourcePower',
'detectorGain',
- 'moduleIndex',
- 'sourceModuleIndex',
- 'detectorModuleIndex',
]
self._indexed_groups = []
@@ -4087,29 +4956,6 @@ def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
self._detectorGain = _PresentDataset
else: # if the dataset is not found on disk
self._detectorGain = _AbsentDataset
- if 'moduleIndex' in self._h:
- if not self._cfg.dynamic_loading:
- self._moduleIndex = _read_int(self._h['moduleIndex'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._moduleIndex = _PresentDataset
- else: # if the dataset is not found on disk
- self._moduleIndex = _AbsentDataset
- if 'sourceModuleIndex' in self._h:
- if not self._cfg.dynamic_loading:
- self._sourceModuleIndex = _read_int(
- self._h['sourceModuleIndex'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._sourceModuleIndex = _PresentDataset
- else: # if the dataset is not found on disk
- self._sourceModuleIndex = _AbsentDataset
- if 'detectorModuleIndex' in self._h:
- if not self._cfg.dynamic_loading:
- self._detectorModuleIndex = _read_int(
- self._h['detectorModuleIndex'])
- else: # if the dataset is found on disk but dynamic_loading=True
- self._detectorModuleIndex = _PresentDataset
- else: # if the dataset is not found on disk
- self._detectorModuleIndex = _AbsentDataset
@property
def sourceIndex(self):
@@ -4121,7 +4967,7 @@ def sourceIndex(self):
Index of the source.
"""
- if type(self._sourceIndex) is type(_AbsentDataset):
+ if self._sourceIndex is _AbsentDataset:
return None
if type(self._sourceIndex) is type(_PresentDataset):
return _read_int(self._h['sourceIndex'])
@@ -4150,7 +4996,7 @@ def detectorIndex(self):
Index of the detector.
"""
- if type(self._detectorIndex) is type(_AbsentDataset):
+ if self._detectorIndex is _AbsentDataset:
return None
if type(self._detectorIndex) is type(_PresentDataset):
return _read_int(self._h['detectorIndex'])
@@ -4180,7 +5026,7 @@ def wavelengthIndex(self):
Index of the "nominal" wavelength (in `probe.wavelengths`).
"""
- if type(self._wavelengthIndex) is type(_AbsentDataset):
+ if self._wavelengthIndex is _AbsentDataset:
return None
if type(self._wavelengthIndex) is type(_PresentDataset):
return _read_int(self._h['wavelengthIndex'])
@@ -4210,7 +5056,7 @@ def wavelengthActual(self):
Actual (measured) wavelength in nm, if available, for the source in a given channel.
"""
- if type(self._wavelengthActual) is type(_AbsentDataset):
+ if self._wavelengthActual is _AbsentDataset:
return None
if type(self._wavelengthActual) is type(_PresentDataset):
return _read_float(self._h['wavelengthActual'])
@@ -4240,7 +5086,7 @@ def wavelengthEmissionActual(self):
Actual (measured) emission wavelength in nm, if available, for the source in a given channel.
"""
- if type(self._wavelengthEmissionActual) is type(_AbsentDataset):
+ if self._wavelengthEmissionActual is _AbsentDataset:
return None
if type(self._wavelengthEmissionActual) is type(_PresentDataset):
return _read_float(self._h['wavelengthEmissionActual'])
@@ -4270,7 +5116,7 @@ def dataType(self):
Data-type identifier. See Appendix for list possible values.
"""
- if type(self._dataType) is type(_AbsentDataset):
+ if self._dataType is _AbsentDataset:
return None
if type(self._dataType) is type(_PresentDataset):
return _read_int(self._h['dataType'])
@@ -4299,7 +5145,7 @@ def dataUnit(self):
International System of Units (SI units) identifier for the given channel. Encoding should follow the [CMIXF-12 standard](https://people.csail.mit.edu/jaffer/MIXF/CMIXF-12), avoiding special unicode symbols like U+03BC (m) or U+00B5 (u) and using '/' rather than 'per' for units such as `V/us`. The recommended export format is in unscaled units such as V, s, Mole.
"""
- if type(self._dataUnit) is type(_AbsentDataset):
+ if self._dataUnit is _AbsentDataset:
return None
if type(self._dataUnit) is type(_PresentDataset):
return _read_string(self._h['dataUnit'])
@@ -4329,7 +5175,7 @@ def dataTypeLabel(self):
for list of possible values.
"""
- if type(self._dataTypeLabel) is type(_AbsentDataset):
+ if self._dataTypeLabel is _AbsentDataset:
return None
if type(self._dataTypeLabel) is type(_PresentDataset):
return _read_string(self._h['dataTypeLabel'])
@@ -4356,12 +5202,10 @@ def dataTypeIndex(self):
If dynamic_loading=True, the data is loaded from the SNIRF file only
when accessed through the getter
- Data-type specific parameter indices. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the probe structure, as detailed below. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so the data type index must be a vector with 2 elements that index the additional parameters. One use of this parameter is as a
- stimulus condition index when `measurementList(k).dataType = 99999` (i.e, `processed` and
- `measurementList(k).dataTypeLabel = 'HRF ...'` .
+ Data-type specific parameter index. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the probe structure, as detailed below. Note that where multiple parameters are required, the same index must be used into each (examples include data types such as Time Domain and Diffuse Correlation Spectroscopy). One use of this parameter is as a stimulus condition index when `measurementList(k).dataType = 99999` (i.e, `processed` and `measurementList(k).dataTypeLabel = 'HRF ...'` .
"""
- if type(self._dataTypeIndex) is type(_AbsentDataset):
+ if self._dataTypeIndex is _AbsentDataset:
return None
if type(self._dataTypeIndex) is type(_PresentDataset):
return _read_int(self._h['dataTypeIndex'])
@@ -4391,7 +5235,7 @@ def sourcePower(self):
The units are not defined, unless the user takes the option of using a `metaDataTag` as described below.
"""
- if type(self._sourcePower) is type(_AbsentDataset):
+ if self._sourcePower is _AbsentDataset:
return None
if type(self._sourcePower) is type(_PresentDataset):
return _read_float(self._h['sourcePower'])
@@ -4419,106 +5263,6 @@ def detectorGain(self):
Detector gain
- """
- if type(self._detectorGain) is type(_AbsentDataset):
- return None
- if type(self._detectorGain) is type(_PresentDataset):
- return _read_float(self._h['detectorGain'])
- self._cfg.logger.info('Dynamically loaded %s/detectorGain from %s',
- self.location, self.filename)
- return self._detectorGain
-
- @detectorGain.setter
- def detectorGain(self, value):
- self._detectorGain = value
- # self._cfg.logger.info('Assignment to %s/detectorGain in %s', self.location, self.filename)
-
- @detectorGain.deleter
- def detectorGain(self):
- self._detectorGain = _AbsentDataset
- self._cfg.logger.info('Deleted %s/detectorGain from %s', self.location,
- self.filename)
-
- @property
- def moduleIndex(self):
- """SNIRF field `moduleIndex`.
-
- If dynamic_loading=True, the data is loaded from the SNIRF file only
- when accessed through the getter
-
- Index of a repeating module. If `moduleIndex` is provided while `useLocalIndex`
- is set to `true`, then, both `measurementList(k).sourceIndex` and
- `measurementList(k).detectorIndex` are assumed to be the local indices
- of the same module specified by `moduleIndex`. If the source and
- detector are located on different modules, one must use `sourceModuleIndex`
- and `detectorModuleIndex` instead to specify separate parent module
- indices. See below.
-
-
- """
- if type(self._moduleIndex) is type(_AbsentDataset):
- return None
- if type(self._moduleIndex) is type(_PresentDataset):
- return _read_int(self._h['moduleIndex'])
- self._cfg.logger.info('Dynamically loaded %s/moduleIndex from %s',
- self.location, self.filename)
- return self._moduleIndex
-
- @moduleIndex.setter
- def moduleIndex(self, value):
- self._moduleIndex = value
- # self._cfg.logger.info('Assignment to %s/moduleIndex in %s', self.location, self.filename)
-
- @moduleIndex.deleter
- def moduleIndex(self):
- self._moduleIndex = _AbsentDataset
- self._cfg.logger.info('Deleted %s/moduleIndex from %s', self.location,
- self.filename)
-
- @property
- def sourceModuleIndex(self):
- """SNIRF field `sourceModuleIndex`.
-
- If dynamic_loading=True, the data is loaded from the SNIRF file only
- when accessed through the getter
-
- Index of the module that contains the source of the channel.
- This index must be used together with `detectorModuleIndex`, and
- can not be used when `moduleIndex` presents.
-
- """
- if type(self._sourceModuleIndex) is type(_AbsentDataset):
- return None
- if type(self._sourceModuleIndex) is type(_PresentDataset):
- return _read_int(self._h['sourceModuleIndex'])
- self._cfg.logger.info(
- 'Dynamically loaded %s/sourceModuleIndex from %s',
- self.location, self.filename)
- return self._sourceModuleIndex
-
- @sourceModuleIndex.setter
- def sourceModuleIndex(self, value):
- self._sourceModuleIndex = value
- # self._cfg.logger.info('Assignment to %s/sourceModuleIndex in %s', self.location, self.filename)
-
- @sourceModuleIndex.deleter
- def sourceModuleIndex(self):
- self._sourceModuleIndex = _AbsentDataset
- self._cfg.logger.info('Deleted %s/sourceModuleIndex from %s',
- self.location, self.filename)
-
- @property
- def detectorModuleIndex(self):
- """SNIRF field `detectorModuleIndex`.
-
- If dynamic_loading=True, the data is loaded from the SNIRF file only
- when accessed through the getter
-
- Index of the module that contains the detector of the channel.
- This index must be used together with `sourceModuleIndex`, and
- can not be used when `moduleIndex` presents.
-
-
For example, if `measurementList5` is a structure with `sourceIndex=2`,
`detectorIndex=3`, `wavelengthIndex=1`, `dataType=1`, `dataTypeIndex=1` would
imply that the data in the 5th column of the `dataTimeSeries` variable was
@@ -4553,25 +5297,24 @@ def detectorModuleIndex(self):
label for sources and detectors.
"""
- if type(self._detectorModuleIndex) is type(_AbsentDataset):
+ if self._detectorGain is _AbsentDataset:
return None
- if type(self._detectorModuleIndex) is type(_PresentDataset):
- return _read_int(self._h['detectorModuleIndex'])
- self._cfg.logger.info(
- 'Dynamically loaded %s/detectorModuleIndex from %s',
- self.location, self.filename)
- return self._detectorModuleIndex
+ if type(self._detectorGain) is type(_PresentDataset):
+ return _read_float(self._h['detectorGain'])
+ self._cfg.logger.info('Dynamically loaded %s/detectorGain from %s',
+ self.location, self.filename)
+ return self._detectorGain
- @detectorModuleIndex.setter
- def detectorModuleIndex(self, value):
- self._detectorModuleIndex = value
- # self._cfg.logger.info('Assignment to %s/detectorModuleIndex in %s', self.location, self.filename)
+ @detectorGain.setter
+ def detectorGain(self, value):
+ self._detectorGain = value
+ # self._cfg.logger.info('Assignment to %s/detectorGain in %s', self.location, self.filename)
- @detectorModuleIndex.deleter
- def detectorModuleIndex(self):
- self._detectorModuleIndex = _AbsentDataset
- self._cfg.logger.info('Deleted %s/detectorModuleIndex from %s',
- self.location, self.filename)
+ @detectorGain.deleter
+ def detectorGain(self):
+ self._detectorGain = _AbsentDataset
+ self._cfg.logger.info('Deleted %s/detectorGain from %s', self.location,
+ self.filename)
def _save(self, *args):
if len(args) > 0 and type(args[0]) is h5py.File:
@@ -4591,7 +5334,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/sourceIndex'
- if type(self._sourceIndex) not in [type(_AbsentDataset), type(None)]:
+ if not self._sourceIndex is _AbsentDataset:
data = self.sourceIndex # Use loader function via getter
if name in file:
del file[name]
@@ -4602,7 +5345,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/detectorIndex'
- if type(self._detectorIndex) not in [type(_AbsentDataset), type(None)]:
+ if not self._detectorIndex is _AbsentDataset:
data = self.detectorIndex # Use loader function via getter
if name in file:
del file[name]
@@ -4613,9 +5356,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/wavelengthIndex'
- if type(self._wavelengthIndex) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._wavelengthIndex is _AbsentDataset:
data = self.wavelengthIndex # Use loader function via getter
if name in file:
del file[name]
@@ -4626,9 +5367,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/wavelengthActual'
- if type(self._wavelengthActual) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._wavelengthActual is _AbsentDataset:
data = self.wavelengthActual # Use loader function via getter
if name in file:
del file[name]
@@ -4639,9 +5378,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/wavelengthEmissionActual'
- if type(self._wavelengthEmissionActual) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._wavelengthEmissionActual is _AbsentDataset:
data = self.wavelengthEmissionActual # Use loader function via getter
if name in file:
del file[name]
@@ -4652,7 +5389,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataType'
- if type(self._dataType) not in [type(_AbsentDataset), type(None)]:
+ if not self._dataType is _AbsentDataset:
data = self.dataType # Use loader function via getter
if name in file:
del file[name]
@@ -4663,7 +5400,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataUnit'
- if type(self._dataUnit) not in [type(_AbsentDataset), type(None)]:
+ if not self._dataUnit is _AbsentDataset:
data = self.dataUnit # Use loader function via getter
if name in file:
del file[name]
@@ -4674,7 +5411,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataTypeLabel'
- if type(self._dataTypeLabel) not in [type(_AbsentDataset), type(None)]:
+ if not self._dataTypeLabel is _AbsentDataset:
data = self.dataTypeLabel # Use loader function via getter
if name in file:
del file[name]
@@ -4685,7 +5422,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataTypeIndex'
- if type(self._dataTypeIndex) not in [type(_AbsentDataset), type(None)]:
+ if not self._dataTypeIndex is _AbsentDataset:
data = self.dataTypeIndex # Use loader function via getter
if name in file:
del file[name]
@@ -4696,7 +5433,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/sourcePower'
- if type(self._sourcePower) not in [type(_AbsentDataset), type(None)]:
+ if not self._sourcePower is _AbsentDataset:
data = self.sourcePower # Use loader function via getter
if name in file:
del file[name]
@@ -4707,7 +5444,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/detectorGain'
- if type(self._detectorGain) not in [type(_AbsentDataset), type(None)]:
+ if not self._detectorGain is _AbsentDataset:
data = self.detectorGain # Use loader function via getter
if name in file:
del file[name]
@@ -4717,49 +5454,15 @@ def _save(self, *args):
if name in file:
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
- name = self.location + '/moduleIndex'
- if type(self._moduleIndex) not in [type(_AbsentDataset), type(None)]:
- data = self.moduleIndex # Use loader function via getter
- if name in file:
- del file[name]
- _create_dataset_int(file, name, data)
- # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
- else:
- if name in file:
- del file[name]
- self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
- name = self.location + '/sourceModuleIndex'
- if type(self._sourceModuleIndex) not in [
- type(_AbsentDataset), type(None)
- ]:
- data = self.sourceModuleIndex # Use loader function via getter
- if name in file:
- del file[name]
- _create_dataset_int(file, name, data)
- # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
- else:
- if name in file:
- del file[name]
- self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
- name = self.location + '/detectorModuleIndex'
- if type(self._detectorModuleIndex) not in [
- type(_AbsentDataset), type(None)
- ]:
- data = self.detectorModuleIndex # Use loader function via getter
- if name in file:
- del file[name]
- _create_dataset_int(file, name, data)
- # self._cfg.logger.info('Creating Dataset %s in %s', name, file)
- else:
- if name in file:
- del file[name]
- self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/sourceIndex'
- if type(self._sourceIndex) in [type(_AbsentDataset), type(None)]:
+ if self._sourceIndex is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -4779,7 +5482,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/detectorIndex'
- if type(self._detectorIndex) in [type(_AbsentDataset), type(None)]:
+ if self._detectorIndex is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -4799,9 +5502,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/wavelengthIndex'
- if type(self._wavelengthIndex) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._wavelengthIndex is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -4821,9 +5522,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/wavelengthActual'
- if type(self._wavelengthActual) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._wavelengthActual is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -4837,9 +5536,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/wavelengthEmissionActual'
- if type(self._wavelengthEmissionActual) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._wavelengthEmissionActual is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -4855,7 +5552,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataType'
- if type(self._dataType) in [type(_AbsentDataset), type(None)]:
+ if self._dataType is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -4869,7 +5566,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataUnit'
- if type(self._dataUnit) in [type(_AbsentDataset), type(None)]:
+ if self._dataUnit is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -4883,7 +5580,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataTypeLabel'
- if type(self._dataTypeLabel) in [type(_AbsentDataset), type(None)]:
+ if self._dataTypeLabel is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -4897,7 +5594,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataTypeIndex'
- if type(self._dataTypeIndex) in [type(_AbsentDataset), type(None)]:
+ if self._dataTypeIndex is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -4917,7 +5614,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/sourcePower'
- if type(self._sourcePower) in [type(_AbsentDataset), type(None)]:
+ if self._sourcePower is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -4931,7 +5628,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/detectorGain'
- if type(self._detectorGain) in [type(_AbsentDataset), type(None)]:
+ if self._detectorGain is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -4944,72 +5641,6 @@ def _validate(self, result: ValidationResult):
result._add(name, _validate_float(dataset))
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
- name = self.location + '/moduleIndex'
- if type(self._moduleIndex) in [type(_AbsentDataset), type(None)]:
- result._add(name, 'OPTIONAL_DATASET_MISSING')
- else:
- try:
- if type(self._moduleIndex) is type(
- _PresentDataset) or 'moduleIndex' in self._h:
- dataset = self._h['moduleIndex']
- else:
- dataset = _create_dataset_int(tmp, 'moduleIndex',
- self._moduleIndex)
- err_code = _validate_int(dataset)
- if _read_int(dataset) < 0 and err_code == 'OK':
- result._add(name, 'NEGATIVE_INDEX')
- elif _read_int(dataset) == 0 and err_code == 'OK':
- result._add(name, 'INDEX_OF_ZERO')
- else:
- result._add(name, err_code)
- except ValueError: # If the _create_dataset function can't convert the data
- result._add(name, 'INVALID_DATASET_TYPE')
- name = self.location + '/sourceModuleIndex'
- if type(self._sourceModuleIndex) in [
- type(_AbsentDataset), type(None)
- ]:
- result._add(name, 'OPTIONAL_DATASET_MISSING')
- else:
- try:
- if type(self._sourceModuleIndex) is type(
- _PresentDataset) or 'sourceModuleIndex' in self._h:
- dataset = self._h['sourceModuleIndex']
- else:
- dataset = _create_dataset_int(tmp, 'sourceModuleIndex',
- self._sourceModuleIndex)
- err_code = _validate_int(dataset)
- if _read_int(dataset) < 0 and err_code == 'OK':
- result._add(name, 'NEGATIVE_INDEX')
- elif _read_int(dataset) == 0 and err_code == 'OK':
- result._add(name, 'INDEX_OF_ZERO')
- else:
- result._add(name, err_code)
- except ValueError: # If the _create_dataset function can't convert the data
- result._add(name, 'INVALID_DATASET_TYPE')
- name = self.location + '/detectorModuleIndex'
- if type(self._detectorModuleIndex) in [
- type(_AbsentDataset), type(None)
- ]:
- result._add(name, 'OPTIONAL_DATASET_MISSING')
- else:
- try:
- if type(self._detectorModuleIndex) is type(
- _PresentDataset
- ) or 'detectorModuleIndex' in self._h:
- dataset = self._h['detectorModuleIndex']
- else:
- dataset = _create_dataset_int(
- tmp, 'detectorModuleIndex',
- self._detectorModuleIndex)
- err_code = _validate_int(dataset)
- if _read_int(dataset) < 0 and err_code == 'OK':
- result._add(name, 'NEGATIVE_INDEX')
- elif _read_int(dataset) == 0 and err_code == 'OK':
- result._add(name, 'INDEX_OF_ZERO')
- else:
- result._add(name, err_code)
- except ValueError: # If the _create_dataset function can't convert the data
- result._add(name, 'INVALID_DATASET_TYPE')
for key in self._h.keys():
if not any(
[key.startswith(name) for name in self._snirf_names]):
@@ -5049,6 +5680,7 @@ def __init__(self, h: h5py.File, cfg: SnirfConfig):
class StimElement(Group):
"""Wrapper for an element of indexed group `Stim`."""
+
def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
super().__init__(gid, cfg)
self._name = _AbsentDataset # "s"+
@@ -5094,7 +5726,7 @@ def name(self):
"""
- if type(self._name) is type(_AbsentDataset):
+ if self._name is _AbsentDataset:
return None
if type(self._name) is type(_PresentDataset):
return _read_string(self._h['name'])
@@ -5135,7 +5767,7 @@ def data(self):
used to annotate the meanings of each data column.
"""
- if type(self._data) is type(_AbsentDataset):
+ if self._data is _AbsentDataset:
return None
if type(self._data) is type(_PresentDataset):
return _read_float_array(self._h['data'])
@@ -5145,7 +5777,8 @@ def data(self):
@data.setter
def data(self, value):
- self._data = value
+ if value is not None and any([v is not None for v in value]):
+ self._data = np.array(value)
# self._cfg.logger.info('Assignment to %s/data in %s', self.location, self.filename)
@data.deleter
@@ -5167,7 +5800,7 @@ def dataLabels(self):
of `/nirs(i)/stim(j)/data`, including the first 3 required columns.
"""
- if type(self._dataLabels) is type(_AbsentDataset):
+ if self._dataLabels is _AbsentDataset:
return None
if type(self._dataLabels) is type(_PresentDataset):
return _read_string_array(self._h['dataLabels'])
@@ -5177,7 +5810,8 @@ def dataLabels(self):
@dataLabels.setter
def dataLabels(self, value):
- self._dataLabels = value
+ if value is not None and any([v is not None for v in value]):
+ self._dataLabels = np.array(value)
# self._cfg.logger.info('Assignment to %s/dataLabels in %s', self.location, self.filename)
@dataLabels.deleter
@@ -5204,7 +5838,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/name'
- if type(self._name) not in [type(_AbsentDataset), type(None)]:
+ if not self._name is _AbsentDataset:
data = self.name # Use loader function via getter
if name in file:
del file[name]
@@ -5215,7 +5849,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/data'
- if type(self._data) not in [type(_AbsentDataset), type(None)]:
+ if not self._data is _AbsentDataset:
data = self.data # Use loader function via getter
if name in file:
del file[name]
@@ -5226,7 +5860,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataLabels'
- if type(self._dataLabels) not in [type(_AbsentDataset), type(None)]:
+ if not self._dataLabels is _AbsentDataset:
data = self.dataLabels # Use loader function via getter
if name in file:
del file[name]
@@ -5239,9 +5873,12 @@ def _save(self, *args):
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/name'
- if type(self._name) in [type(_AbsentDataset), type(None)]:
+ if self._name is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5255,7 +5892,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/data'
- if type(self._data) in [type(_AbsentDataset), type(None)]:
+ if self._data is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5270,7 +5907,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataLabels'
- if type(self._dataLabels) in [type(_AbsentDataset), type(None)]:
+ if self._dataLabels is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5316,6 +5953,7 @@ def __init__(self, h: h5py.File, cfg: SnirfConfig):
class AuxElement(Group):
"""Wrapper for an element of indexed group `Aux`."""
+
def __init__(self, gid: h5py.h5g.GroupID, cfg: SnirfConfig):
super().__init__(gid, cfg)
self._name = _AbsentDataset # "s"+
@@ -5379,7 +6017,7 @@ def name(self):
This is string describing the jth auxiliary data timecourse. While auxiliary data can be given any title, standard names for commonly used auxiliary channels (i.e. accelerometer data) are specified in the appendix.
"""
- if type(self._name) is type(_AbsentDataset):
+ if self._name is _AbsentDataset:
return None
if type(self._name) is type(_PresentDataset):
return _read_string(self._h['name'])
@@ -5409,7 +6047,7 @@ def dataTimeSeries(self):
time points> x `. If multiple channels of related data are generated by a system, they may be encoded in the multiple columns of the time series (i.e. complex numbers). For example, a system containing more than one accelerometer may output this data as a set of `ACCEL_X`/`ACCEL_Y`/`ACCEL_Z` auxiliary time series, where each has the dimension of ` x `. Note that it is NOT recommended to encode the various accelerometer dimensions as multiple channels of the same `aux` Group: instead follow the `"ACCEL_X"`, `"ACCEL_Y"`, `"ACCEL_Z"` naming conventions described in the appendix. Chunked data is allowed to support real-time data streaming.
"""
- if type(self._dataTimeSeries) is type(_AbsentDataset):
+ if self._dataTimeSeries is _AbsentDataset:
return None
if type(self._dataTimeSeries) is type(_PresentDataset):
return _read_float_array(self._h['dataTimeSeries'])
@@ -5420,7 +6058,8 @@ def dataTimeSeries(self):
@dataTimeSeries.setter
def dataTimeSeries(self, value):
- self._dataTimeSeries = value
+ if value is not None and any([v is not None for v in value]):
+ self._dataTimeSeries = np.array(value)
# self._cfg.logger.info('Assignment to %s/dataTimeSeries in %s', self.location, self.filename)
@dataTimeSeries.deleter
@@ -5439,7 +6078,7 @@ def dataUnit(self):
International System of Units (SI units) identifier for the given channel. Encoding should follow the [CMIXF-12 standard](https://people.csail.mit.edu/jaffer/MIXF/CMIXF-12), avoiding special unicode symbols like U+03BC (m) or U+00B5 (u) and using '/' rather than 'per' for units such as `V/us`. The recommended export format is in unscaled units such as V, s, Mole.
"""
- if type(self._dataUnit) is type(_AbsentDataset):
+ if self._dataUnit is _AbsentDataset:
return None
if type(self._dataUnit) is type(_PresentDataset):
return _read_string(self._h['dataUnit'])
@@ -5475,7 +6114,7 @@ def time(self):
Chunked data is allowed to support real-time data streaming
"""
- if type(self._time) is type(_AbsentDataset):
+ if self._time is _AbsentDataset:
return None
if type(self._time) is type(_PresentDataset):
return _read_float_array(self._h['time'])
@@ -5485,7 +6124,8 @@ def time(self):
@time.setter
def time(self, value):
- self._time = value
+ if value is not None and any([v is not None for v in value]):
+ self._time = np.array(value)
# self._cfg.logger.info('Assignment to %s/time in %s', self.location, self.filename)
@time.deleter
@@ -5507,7 +6147,7 @@ def timeOffset(self):
"""
- if type(self._timeOffset) is type(_AbsentDataset):
+ if self._timeOffset is _AbsentDataset:
return None
if type(self._timeOffset) is type(_PresentDataset):
return _read_float_array(self._h['timeOffset'])
@@ -5517,7 +6157,8 @@ def timeOffset(self):
@timeOffset.setter
def timeOffset(self, value):
- self._timeOffset = value
+ if value is not None and any([v is not None for v in value]):
+ self._timeOffset = np.array(value)
# self._cfg.logger.info('Assignment to %s/timeOffset in %s', self.location, self.filename)
@timeOffset.deleter
@@ -5544,7 +6185,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/name'
- if type(self._name) not in [type(_AbsentDataset), type(None)]:
+ if not self._name is _AbsentDataset:
data = self.name # Use loader function via getter
if name in file:
del file[name]
@@ -5555,9 +6196,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataTimeSeries'
- if type(self._dataTimeSeries) not in [
- type(_AbsentDataset), type(None)
- ]:
+ if not self._dataTimeSeries is _AbsentDataset:
data = self.dataTimeSeries # Use loader function via getter
if name in file:
del file[name]
@@ -5568,7 +6207,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/dataUnit'
- if type(self._dataUnit) not in [type(_AbsentDataset), type(None)]:
+ if not self._dataUnit is _AbsentDataset:
data = self.dataUnit # Use loader function via getter
if name in file:
del file[name]
@@ -5579,7 +6218,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/time'
- if type(self._time) not in [type(_AbsentDataset), type(None)]:
+ if not self._time is _AbsentDataset:
data = self.time # Use loader function via getter
if name in file:
del file[name]
@@ -5590,7 +6229,7 @@ def _save(self, *args):
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
name = self.location + '/timeOffset'
- if type(self._timeOffset) not in [type(_AbsentDataset), type(None)]:
+ if not self._timeOffset is _AbsentDataset:
data = self.timeOffset # Use loader function via getter
if name in file:
del file[name]
@@ -5603,9 +6242,12 @@ def _save(self, *args):
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/name'
- if type(self._name) in [type(_AbsentDataset), type(None)]:
+ if self._name is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5619,9 +6261,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataTimeSeries'
- if type(self._dataTimeSeries) in [
- type(_AbsentDataset), type(None)
- ]:
+ if self._dataTimeSeries is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5636,7 +6276,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/dataUnit'
- if type(self._dataUnit) in [type(_AbsentDataset), type(None)]:
+ if self._dataUnit is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5650,7 +6290,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/time'
- if type(self._time) in [type(_AbsentDataset), type(None)]:
+ if self._time is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5665,7 +6305,7 @@ def _validate(self, result: ValidationResult):
except ValueError: # If the _create_dataset function can't convert the data
result._add(name, 'INVALID_DATASET_TYPE')
name = self.location + '/timeOffset'
- if type(self._timeOffset) in [type(_AbsentDataset), type(None)]:
+ if self._timeOffset is _AbsentDataset:
result._add(name, 'OPTIONAL_DATASET_MISSING')
else:
try:
@@ -5720,6 +6360,7 @@ def __init__(self,
self._cfg = SnirfConfig()
self._cfg.dynamic_loading = dynamic_loading
self._cfg.fmode = ''
+ self._f = None # handle for filelikes and temporary files
if len(args) > 0:
path = args[0]
if enable_logging:
@@ -5746,6 +6387,7 @@ def __init__(self,
warn(
'Use `Snirf(, )` to open SNIRF file from path. Path-only construction is deprecated.',
DeprecationWarning)
+ # fmode is ''
if type(path) is str:
if not path.endswith('.snirf'):
path.replace('.', '')
@@ -5764,7 +6406,8 @@ def __init__(self,
self._cfg.logger.info('Loading from filelike object')
if self._cfg.fmode == '':
self._cfg.fmode = 'r'
- self._h = h5py.File(path, 'r')
+ self._f = args[0]
+ self._h = h5py.File(self._f, 'r', backing_store=False)
else:
raise TypeError(str(path) + ' is not a valid filename')
else:
@@ -5776,7 +6419,10 @@ def __init__(self,
self._cfg.logger = _create_logger('',
None) # Do not log to file
self._cfg.fmode = 'w'
- self._h = h5py.File(TemporaryFile(), 'w')
+ self._h = h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False)
self._formatVersion = _AbsentDataset # "s"*
self._nirs = _AbsentDataset # {i}*
self._snirf_names = [
@@ -5806,7 +6452,7 @@ def formatVersion(self):
describes format version "1.0"
"""
- if type(self._formatVersion) is type(_AbsentDataset):
+ if self._formatVersion is _AbsentDataset:
return None
if type(self._formatVersion) is type(_PresentDataset):
return _read_string(self._h['formatVersion'])
@@ -5876,7 +6522,7 @@ def _save(self, *args):
self.__class__.__name__ +
' instance without a filename')
name = self.location + '/formatVersion'
- if type(self._formatVersion) not in [type(_AbsentDataset), type(None)]:
+ if not self._formatVersion is _AbsentDataset:
data = self.formatVersion # Use loader function via getter
if name in file:
del file[name]
@@ -5886,13 +6532,17 @@ def _save(self, *args):
if name in file:
del file[name]
self._cfg.logger.info('Deleted Dataset %s from %s', name, file)
+ name = self.location + '/nirs'
self.nirs._save(*args)
def _validate(self, result: ValidationResult):
# Validate unwritten datasets after writing them to this tempfile
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
name = self.location + '/formatVersion'
- if type(self._formatVersion) in [type(_AbsentDataset), type(None)]:
+ if self._formatVersion is _AbsentDataset:
result._add(name, 'REQUIRED_DATASET_MISSING')
else:
try:
@@ -5941,6 +6591,58 @@ def _validate(self, result: ValidationResult):
'UNCInfant',
]
+_RECOGNIZED_AUX_NAMES = [
+ 'ACCEL_X',
+ 'ACCEL_Y',
+ 'ACCEL_Z',
+ 'GYRO_X',
+ 'GYRO_Y',
+ 'GYRO_Z',
+ 'MAGN_X',
+ 'MAGN_Y',
+ 'MAGN_Z',
+]
+
+_RECOGNIZED_DATA_TYPES = [
+ 1,
+ 51,
+ 101,
+ 102,
+ 151,
+ 152,
+ 201,
+ 251,
+ 301,
+ 351,
+ 401,
+ 410,
+ 99999,
+]
+
+_RECOGNIZED_DATA_TYPE_LABELS = [
+ 'dOD',
+ 'dMean',
+ 'dVar',
+ 'dSkew',
+ 'mua',
+ 'musp',
+ 'HbO',
+ 'HbR',
+ 'HbT',
+ 'H2O',
+ 'Lipid',
+ 'StO2',
+ 'BFi',
+ 'HRF dOD',
+ 'HRF dMean',
+ 'HRF dVar',
+ 'HRF dSkew',
+ 'HRF HbO',
+ 'HRF HbR',
+ 'HRF HbT',
+ 'HRF BFi',
+]
+
# <<< END TEMPLATE INSERT >>>
# ================================================================================
# DO NOT EDIT THE ABOVE CODE! IT IS GENERATED VIA TEMPLATE. SEE README FOR DETAILS
@@ -5950,6 +6652,7 @@ def _validate(self, result: ValidationResult):
class MetaDataTags(MetaDataTags):
+
def add(self, name, value):
"""Add a new tag to the list.
@@ -5985,6 +6688,7 @@ def remove(self, name):
class StimElement(StimElement):
+
def _validate(self, result: ValidationResult):
super()._validate(result)
@@ -6002,6 +6706,7 @@ class Stim(Stim):
class AuxElement(AuxElement):
+
def _validate(self, result: ValidationResult):
super()._validate(result)
@@ -6015,26 +6720,129 @@ class Aux(Aux):
class DataElement(DataElement):
+
+ def measurementList_to_measurementLists(self):
+ """Converts `measurementList` to a `measurementLists` structure if it is present.
+
+ This method will populate the `measurementLists` Group structure with the contents of the `measurementList` indexed Group.
+
+ The `measurementList` indexedGroup is not be removed.
+ """
+ if len(self.measurementList) > 0:
+ for dataset_name in self.measurementList[0]._snirf_names:
+ vals = [
+ getattr(ml, dataset_name) for ml in self.measurementList
+ ]
+ if all(val is not None for val in vals):
+ setattr(self.measurementLists, dataset_name, vals)
+
+ def measurementLists_to_measurementList(self):
+ """Converts `measurementLists` to a `measurementList` indexed Group structure if it is present.
+
+ This method will create new `measurementList` indexed Group entries populated with the contents
+ of the `measurementLists` Group.
+
+ The `measurementList` Group is not removed.
+ """
+ values = {}
+ for dataset_name in self.measurementLists._snirf_names:
+ val = getattr(self.measurementLists, dataset_name)
+ if val is not None:
+ values[dataset_name] = val
+ if len(self.measurementList) > 0:
+ del self.measurementList[:]
+ n = max(len(v)
+ for v in values.values()) # Number of measurementList entries
+ [self.measurementList.appendGroup() for i in range(n)]
+ for i in range(n):
+ row = {k: v[i] for k, v in values.items()}
+ for k, v in row.items():
+ setattr(self.measurementList[i], k, v)
+
def _validate(self, result: ValidationResult):
- super()._validate(result)
+ # Override measurementList/measurementLists validation, only one is required
+ ml = self.measurementList is not None
+ mls = self.measurementLists is not None
+ if (ml and mls):
+ result._add(self.location + '/measurementList', 'OK')
+ result._add(self.location + '/measurementLists', 'OK')
+ elif (ml or mls):
+ result._add(self.location + '/measurementList',
+ ['OPTIONAL_DATASET_MISSING', 'OK'][int(ml)])
+ result._add(self.location + '/measurementLists',
+ ['OPTIONAL_DATASET_MISSING', 'OK'][int(mls)])
+ else:
+ result._add(self.location + '/measurementList',
+ ['REQUIRED_DATASET_MISSING', 'OK'][int(ml)])
+ result._add(self.location + '/measurementLists',
+ ['REQUIRED_DATASET_MISSING', 'OK'][int(mls)])
+
+ # Check time/dataTimeSeries length agreement
if all(attr is not None for attr in [self.time, self.dataTimeSeries]):
if self.time.size != np.shape(self.dataTimeSeries)[0]:
result._add(self.location + '/time', 'INVALID_TIME')
- if len(self.measurementList) != np.shape(self.dataTimeSeries)[1]:
+ # Check measurementList(s) length depending on which exist
+ n = np.shape(self.dataTimeSeries)[1]
+ ml_valid = (len(self.measurementList) == n)
+ if self.measurementLists is not None and not self.measurementLists.is_empty(
+ ): # if measurementLists exists
+ mls_valid = self.measurementLists is not None and (
+ not self.measurementLists.is_empty() and not any([
+ len(getattr(self.measurementLists, k)) != n
+ for k in self.measurementLists._snirf_names
+ if getattr(self.measurementLists, k) is not None
+ ]))
+ if not mls_valid:
+ result._add(self.location, 'INVALID_MEASUREMENTLISTS')
+ if not ml_valid:
+ result._add(self.location, 'INVALID_MEASUREMENTLIST')
+ elif not ml_valid:
result._add(self.location, 'INVALID_MEASUREMENTLIST')
+ # Validate dataType and dataTypeLabel
+ if self.measurementLists is not None and not self.measurementLists.is_empty(
+ ):
+ if self.measurementLists.dataType is not None:
+ for value in self.measurementLists.dataType:
+ if value not in _RECOGNIZED_DATA_TYPES:
+ result._add(
+ self.location + '/measurementLists/dataType',
+ 'UNRECOGNIZED_DATA_TYPE')
+ elif value == 99999:
+ if self.measurementLists.dataTypeLabel is not None:
+ for label in self.measurementLists.dataTypeLabel:
+ if label not in _RECOGNIZED_DATA_TYPE_LABELS:
+ result._add(
+ self.location +
+ '/measurementLists/dataTypeLabel',
+ 'UNRECOGNIZED_DATA_TYPE_LABEL')
+ for ml in self.measurementList:
+ if ml.dataType is not None and ml.dataType not in _RECOGNIZED_DATA_TYPES:
+ result._add(ml.location + '/dataType',
+ 'UNRECOGNIZED_DATA_TYPE')
+ elif ml.dataType == 99999:
+ if ml.dataTypeLabel is not None and ml.dataTypeLabel not in _RECOGNIZED_DATA_TYPE_LABELS:
+ result._add(ml.location + '/dataTypeLabel',
+ 'UNRECOGNIZED_DATA_TYPE_LABEL')
+
+ super()._validate(result)
+
class Data(Data):
_element = DataElement
class Probe(Probe):
+
def _validate(self, result: ValidationResult):
# Override sourceLabels validation, can be 1D or 2D
- with h5py.File(TemporaryFile(), 'w') as tmp:
+ with h5py.File(str(uuid.uuid4()),
+ 'w',
+ driver='core',
+ backing_store=False) as tmp:
if type(self._sourceLabels) in [type(_AbsentDataset), type(None)]:
result._add(self.location + '/sourceLabels',
'OPTIONAL_DATASET_MISSING')
@@ -6093,6 +6901,13 @@ def _validate(self, result: ValidationResult):
super()._validate(result)
+class MeasurementLists(MeasurementLists):
+
+ def _validate(self, result):
+
+ return super()._validate(result)
+
+
class Snirf(Snirf):
# overload
@@ -6155,6 +6970,27 @@ def validate(self) -> ValidationResult:
self._validate(result)
return result
+ def measurementList_to_measurementLists(self):
+ """Convert the `measurementList` field of all `Data` elements to `measurementLists`.
+
+ Does not delete the measurementList Dataset.
+ """
+ for nirs in self.nirs:
+ for data in nirs.data:
+ data.measurementList_to_measurementLists()
+
+ def measurementLists_to_measurementList(self):
+ """Converts `measurementLists` to a `measurementList` indexed Group structure if it is present.
+
+ This method will create new `measurementList` indexed Group entries populated with the contents
+ of the `measurementLists` Group.
+
+ The `measurementList` Group is not removed.
+ """
+ for nirs in self.nirs:
+ for data in nirs.data:
+ data.measurementLists_to_measurementList()
+
# overload
@property
def filename(self):
@@ -6175,6 +7011,14 @@ def close(self):
self._cfg.logger.info('Closing Snirf file %s', self.filename)
_close_logger(self._cfg.logger)
self._h.close()
+ if self._f is not None:
+ self._f.close()
+
+ def __del__(self):
+ try:
+ self.close()
+ except:
+ pass # Was already closed
def __enter__(self):
return self
@@ -6191,39 +7035,90 @@ def __getitem__(self, key):
return None
def _validate(self, result: ValidationResult):
- super()._validate(result)
# TODO INVALID_FILENAME, INVALID_FILE detection
+ # Compare measurement list to probe
for nirs in self.nirs:
if type(nirs.probe) not in [type(None), type(_AbsentGroup)]:
+ lenSourceLabels = None
+ lenDetectorLabels = None
+ lenWavelengths = None
+ lenSources = None
+ lenDetectors = None
+ # todo label validation of length against probe
if nirs.probe.sourceLabels is not None:
- lenSourceLabels = nirs.probe.sourceLabels.size
- else:
- lenSourceLabels = 0
+ lenSourceLabels = len(nirs.probe.sourceLabels)
if nirs.probe.detectorLabels is not None:
- lenDetectorLabels = nirs.probe.detectorLabels.size
- else:
- lenDetectorLabels = 0
+ lenDetectorLabels = len(nirs.probe.detectorLabels)
if nirs.probe.wavelengths is not None:
- lenWavelengths = nirs.probe.wavelengths.size
- else:
- lenWavelengths = 0
+ lenWavelengths = len(nirs.probe.wavelengths)
+ if nirs.probe.sourcePos2D is not None:
+ lenSources = nirs.probe.sourcePos2D.shape[0]
+ elif nirs.probe.sourcePos3D is not None:
+ lenSources = nirs.probe.sourcePos3D.shape[0]
+ if nirs.probe.detectorPos2D is not None:
+ lenDetectors = nirs.probe.detectorPos2D.shape[0]
+ elif nirs.probe.detectorPos3D is not None:
+ lenDetectors = nirs.probe.detectorPos3D.shape[0]
for data in nirs.data:
+ if data.measurementLists is not None:
+ if lenSourceLabels is not None and data.measurementLists.sourceIndex is not None and not np.all(
+ [
+ 0 < x <= lenSourceLabels
+ for x in data.measurementLists.sourceIndex
+ ]):
+ result._add(
+ data.measurementLists.location +
+ '/sourceIndex', 'INVALID_SOURCE_INDEX')
+ if lenSources is not None and data.measurementLists.sourceIndex is not None and not np.all(
+ [
+ 0 < x <= lenSources
+ for x in data.measurementLists.sourceIndex
+ ]):
+ result._add(
+ data.measurementLists.location +
+ '/sourceIndex', 'INVALID_SOURCE_INDEX')
+ if lenDetectorLabels is not None and data.measurementLists.detectorIndex is not None and not np.all(
+ [
+ 0 < x <= lenDetectorLabels
+ for x in data.measurementLists.detectorIndex
+ ]):
+ result._add(
+ data.measurementLists.location +
+ '/detectorIndex', 'INVALID_DETECTOR_INDEX')
+ if lenDetectors is not None and data.measurementLists.detectorIndex is not None and not np.all(
+ [
+ 0 < x <= lenDetectors
+ for x in data.measurementLists.detectorIndex
+ ]):
+ result._add(
+ data.measurementLists.location +
+ '/detectorIndex', 'INVALID_DETECTOR_INDEX')
+ if lenWavelengths is not None and data.measurementLists.wavelengthIndex is not None and not np.all(
+ [
+ 0 < x <= lenWavelengths
+ for x in data.measurementLists.wavelengthIndex
+ ]): # No wavelengths should raise a missing issue
+ result._add(
+ data.measurementLists.location +
+ '/wavelengthIndex', 'INVALID_WAVELENGTH_INDEX')
for ml in data.measurementList:
- if ml.sourceIndex is not None:
- if ml.sourceIndex > lenSourceLabels:
+ if ml.sourceIndex is not None and lenSources is not None:
+ if not 0 < ml.sourceIndex <= lenSources:
result._add(ml.location + '/sourceIndex',
'INVALID_SOURCE_INDEX')
- if ml.detectorIndex is not None:
- if ml.detectorIndex > lenDetectorLabels:
+ if ml.detectorIndex is not None and lenDetectors is not None:
+ if not 0 < ml.detectorIndex <= lenDetectors:
result._add(ml.location + '/detectorIndex',
'INVALID_DETECTOR_INDEX')
- if ml.wavelengthIndex is not None:
- if ml.wavelengthIndex > lenWavelengths:
+ if ml.wavelengthIndex is not None and lenWavelengths is not None:
+ if not 0 < ml.wavelengthIndex <= lenWavelengths:
result._add(ml.location + '/wavelengthIndex',
'INVALID_WAVELENGTH_INDEX')
+ super()._validate(result)
+
# -- Interface functions ----------------------------------------------------
diff --git a/snirf_specification_retrieved_26_12_24.txt b/snirf_specification_retrieved_26_12_24.txt
new file mode 100644
index 0000000..262fbe1
--- /dev/null
+++ b/snirf_specification_retrieved_26_12_24.txt
@@ -0,0 +1,1231 @@
+Shared Near Infrared Spectroscopy Format (SNIRF) Specification
+==============================================================
+
+* **Document Version**: v1.1
+* **License**: This document is in the public domain.
+
+## Table of Content
+
+- [Introduction](#introduction)
+- [Data format](#data-format)
+- [SNIRF file specification](#snirf-file-specification)
+ * [SNIRF data format summary](#snirf-data-format-summary)
+ * [SNIRF data container definitions](#snirf-data-container-definitions)
+ * [formatVersion](#formatversion)
+ * [nirs](#nirsi)
+ * [metaDataTags](#nirsimetadatatags)
+ * [data](#nirsidataj)
+ * [data.dataTimeSeries](#nirsidatajdatatimeseries)
+ * [data.dataOffset](#nirsidatajdataoffset)
+ * [data.time](#nirsidatajtime)
+ * [data.measurementList](#nirsidatajmeasurementlistk)
+ * [data.measurementList.sourceIndex](#nirsidatajmeasurementlistksourceindex)
+ * [data.measurementList.detectorIndex](#nirsidatajmeasurementlistkdetectorindex)
+ * [data.measurementList.wavelengthIndex](#nirsidatajmeasurementlistkwavelengthindex)
+ * [data.measurementList.wavelengthActual](#nirsidatajmeasurementlistkwavelengthactual)
+ * [data.measurementList.wavelengthEmissionActual](#nirsidatajmeasurementlistkwavelengthemissionactual)
+ * [data.measurementList.dataType](#nirsidatajmeasurementlistkdatatype)
+ * [data.measurementList.dataUnit](#nirsidatajmeasurementlistkdataunit)
+ * [data.measurementList.dataTypeLabel](#nirsidatajmeasurementlistkdatatypelabel)
+ * [data.measurementList.dataTypeIndex](#nirsidatajmeasurementlistkdatatypeindex)
+ * [data.measurementList.sourcePower](#nirsidatajmeasurementlistksourcepower)
+ * [data.measurementList.detectorGain](#nirsidatajmeasurementlistkdetectorgain)
+ * [data.measurementLists](#nirsidatajmeasurementlists)
+ * [data.measurementLists.sourceIndex](#nirsidatajmeasurementlistssourceindex)
+ * [data.measurementLists.detectorIndex](#nirsidatajmeasurementlistsdetectorindex)
+ * [data.measurementLists.wavelengthIndex](#nirsidatajmeasurementlistswavelengthindex)
+ * [data.measurementLists.wavelengthActual](#nirsidatajmeasurementlistswavelengthactual)
+ * [data.measurementLists.wavelengthEmissionActual](#nirsidatajmeasurementlistswavelengthemissionactual)
+ * [data.measurementLists.dataType](#nirsidatajmeasurementlistsdatatype)
+ * [data.measurementLists.dataUnit](#nirsidatajmeasurementlistsdataunit)
+ * [data.measurementLists.dataTypeLabel](#nirsidatajmeasurementlistsdatatypelabel)
+ * [data.measurementLists.dataTypeIndex](#nirsidatajmeasurementlistsdatatypeindex)
+ * [data.measurementLists.sourcePower](#nirsidatajmeasurementlistssourcepower)
+ * [data.measurementLists.detectorGain](#nirsidatajmeasurementlistsdetectorgain)
+ * [stim](#nirsistimj)
+ * [stim.name](#nirsistimjname)
+ * [stim.data](#nirsistimjdata)
+ * [stim.dataLabels](#nirsistimjdatalabels)
+ * [probe](#nirsiprobe)
+ * [probe.wavelengths](#nirsiprobewavelengths)
+ * [probe.wavelengthsEmission](#nirsiprobewavelengthsemission)
+ * [probe.sourcePos2D](#nirsiprobesourcepos2d)
+ * [probe.sourcePos3D](#nirsiprobesourcepos3d)
+ * [probe.detectorPos2D](#nirsiprobedetectorpos2d)
+ * [probe.detectorPos3D](#nirsiprobedetectorpos3d)
+ * [probe.frequencies](#nirsiprobefrequencies)
+ * [probe.timeDelays](#nirsiprobetimedelays)
+ * [probe.timeDelayWidths](#nirsiprobetimedelaywidths)
+ * [probe.momentOrders](#nirsiprobemomentorders)
+ * [probe.correlationTimeDelays](#nirsiprobecorrelationtimedelays)
+ * [probe.correlationTimeDelayWidths](#nirsiprobecorrelationtimedelaywidths)
+ * [probe.sourceLabels](#nirsiprobesourcelabels)
+ * [probe.detectorLabels](#nirsiprobedetectorlabels)
+ * [probe.landmarkPos2D](#nirsiprobelandmarkpos2d)
+ * [probe.landmarkPos3D](#nirsiprobelandmarkpos3d)
+ * [probe.landmarkLabels](#nirsiprobelandmarklabelsj)
+ * [probe.CoordinateSystem](#nirsiprobecoordinatesystem)
+ * [probe.CoordinateSystemDescription](#nirsiprobecoordinatesystemdescription)
+ * [aux](#nirsiauxj)
+ * [aux.name](#nirsiauxjname)
+ * [aux.dataTimeSeries](#nirsiauxjdatatimeseries)
+ * [aux.dataUnit](#nirsiauxjdataunit)
+ * [aux.time](#nirsiauxjtime)
+ * [aux.timeOffset](#nirsiauxjtimeoffset)
+- [Appendix](#appendix)
+- [Acknowledgement](#acknowledgement)
+
+
+## Introduction
+
+The file format specification uses the extension `.snirf`. These are HDF5
+format files, renamed with the `.snirf` extension. For a program to be
+"SNIRF-compliant", it must be able to read and write the SNIRF file.
+
+The development of the SNIRF specification is conducted in an open manner using the GitHub
+platform. To contribute or provide feedback visit [https://github.com/fNIRS/snirf](https://github.com/fNIRS/snirf).
+
+## Data format
+
+The HDF5 specifications are defined by the HDF5 group and found at
+https://www.hdfgroup.org. It is expected that HDF5 future versions will remain
+backwards compatibility in the foreseeable future.
+
+The HDF5 format defines "groups" (`H5G` class) and "datasets" (`H5D` class)
+that are the two primary data organization and storage classes used in the
+SNIRF specification.
+
+The structure of each data file has a minimum of required elements noted below.
+
+For each element in the data structure, one of the 4 types is assigned,
+including
+
+- `group`: a structure containing sub-fields (defined in the `H5G` object
+ class). Arrays of groups, also known as the indexed-groups, are denoted
+ with numbers at the end (e.g. `/nirs/data1`, `/nirs/data2`) starting with
+ index 1. Array indices should be contiguous with no skipped values
+ (an empty group with no sub-member is permitted).
+- `string`: a variable-length, null-terminated sequence of characters, i.e. `H5T_C_S1`
+ with size set to `H5T_VARIABLE`. At this time HDF5 does not have a UTF16 native type,
+ so `H5T_NATIVE_B16` will need to be converted to/from unicode-16 within the read/write code).
+
+ > Strings MUST be stored in null-terminated 'variable-length' format to be considered valid. Fixed-length strings and variable-length strings are loaded differently by HDF5 interface implementations.*
+- `integer`: the native integer types `H5T_NATIVE_INT` `H5T` datatype (alias of
+ `H5T_STD_I32BE` or `H5T_STD_I32LE`). Use of 64-bit `long` string types such as `H5T_STD_I64LE` is *not recommended*, although most HDF5 interface implementations will not have issues converting between the two implicitly.
+- `numeric`: one of the native double or floating-point types;
+ `H5T_NATIVE_DOUBLE` or `H5T_NATIVE_FLOAT` in `H5T` (alias of
+ `H5T_IEEE_F64BE`,`H5T_IEEE_F64LE`, i.e. "double", or `H5T_IEEE_F32BE`,
+ `H5T_IEEE_F32LE`, i.e. "float")
+
+Datasets which are not arrays must be saved in [scalar dataspaces](http://davis.lbl.gov/Manuals/HDF5-1.8.7/UG/UG_frame12Dataspaces.html). It is NOT VALID to save Datasets which are not specified as arrays in simple dataspaces with 1 dimension and with size 1. HDF5 interface implementations distinguish between these two formats and exhibit different behavior depending on the format of the file.
+
+Valid arrays MUST:
+
+* Contain elements of a correct type as described above.
+* Occupy a [simple dataspace](http://davis.lbl.gov/Manuals/HDF5-1.8.7/UG/UG_frame12Dataspaces.html).
+* Have exactly the number of dimensions specified. A SNIRF field specified by this document as a `numeric 1-D array` must occupy a dataspace with `rank` of 1.
+
+> For code samples in various programming languages which demonstrate the writing of SNIRF-specified formats, see the [Appendix](#code-samples).
+
+## SNIRF file specification
+
+The SNIRF data format must have the initial `H5G` group type `/nirs` at the
+initial file location.
+
+All indices (source, detector, wavelength, datatype etc) start at 1.
+
+All SNIRF data elements are associated with a unique HDF5 location path in the
+form of `/root/parent/.../name`. All paths must use `/nirs` or `/nirs#` (indexed group array).
+Note that the root `/nirs` can be either indexed or a non-indexed single entry.
+
+If a data element is an HDF5 group and contains multiple sub-groups, it is referred
+to as an **indexed group**. Each element of the sub-group is uniquely identified
+by appending a string-formatted index (starting from 1, with no preceding zeros)
+in the name, for example, `/.../name1` denotes the first sub-group of data element
+`name`, and `/.../name2` denotes the 2nd element, and so on.
+
+In the below sections, we use the notations `"(i)"` `"(j)"` or `"(k)"` inside the
+HDF5 location paths to denote the indices of sub-elements when multiplicity presents.
+
+
+### SNIRF data format summary
+
+Note that this table serves as machine-readable schema for the SNIRF format. Its format may not be altered.
+
+[//]: # (SCHEMA BEGIN)
+
+| SNIRF-formatted NIRS data structure | Meaning of the data | Type |
+|---------------------------------------|----------------------------------------------|----------------|
+| `/formatVersion` | * SNIRF format version | `"s"` * |
+| `/nirs{i}` | * Root-group for 1 or more NIRS datasets | `{i}` * |
+| `metaDataTags` | * Root-group for metadata headers | `{.}` * |
+| `SubjectID` | * Subject identifier | `"s"` * |
+| `MeasurementDate` | * Date of the measurement | `"s"` * |
+| `MeasurementTime` | * Time of the measurement | `"s"` * |
+| `LengthUnit` | * Length unit (case sensitive) | `"s"` * |
+| `TimeUnit` | * Time unit (case sensitive) | `"s"` * |
+| `FrequencyUnit` | * Frequency unit (case sensitive) | `"s"` * |
+| ... | * Additional user-defined metadata entries | |
+| `data{i}` | * Root-group for 1 or more data blocks | `{i}` * |
+| `dataTimeSeries` | * Time-varying signals from all channels | `[[,...]]`* |
+| `time` | * Time (in `TimeUnit` defined in metaDataTag)| `[,...]` * |
+| `offset` | * Absolute offset for all channels | `[,...]` * |
+| `measurementList{i}` | * Per-channel source-detector information | `{i}` * |
+| `sourceIndex` | * Source index for a given channel | `` * |
+| `detectorIndex` | * Detector index for a given channel | `` * |
+| `wavelengthIndex` | * Wavelength index for a given channel | `` * |
+| `wavelengthActual` | * Actual wavelength for a given channel | `` |
+| `wavelengthEmissionActual` | * Actual emission wavelength for a channel | `` |
+| `dataType` | * Data type for a given channel | `` * |
+| `dataUnit` | * SI unit for a given channel | `"s"` |
+| `dataTypeLabel` | * Data type name for a given channel | `"s"` |
+| `dataTypeIndex` | * Data type index for a given channel | `` * |
+| `sourcePower` | * Source power for a given channel | `` |
+| `detectorGain` | * Detector gain for a given channel | `` |
+| `measurementLists` | * source-detector information | `{.}` * |
+| `sourceIndex` | * Source index for each channel | `[,...]`* |
+| `detectorIndex` | * Detector index for each channel | `[,...]`* |
+| `wavelengthIndex` | * Wavelength index for each channel | `[,...]`* |
+| `wavelengthActual` | * Actual wavelength for each channel | `[,...]` |
+| `wavelengthEmissionActual` | * Actual emission wavelength for each channel| `[,...]` |
+| `dataType` | * Data type for each channel | `[,...]`* |
+| `dataUnit` | * SI unit for each channel | `["s",...]` |
+| `dataTypeLabel` | * Data type name for each channel | `["s",...]` |
+| `dataTypeIndex` | * Data type index for each channel | `[,...]`* |
+| `sourcePower` | * Source power for each channel | `[,...]` |
+| `detectorGain` | * Detector gain for each channel | `[,...]` |
+| `stim{i}` | * Root-group for stimulus measurements | `{i}` |
+| `name` | * Name of the stimulus data | `"s"` + |
+| `data` | * Data stream of the stimulus channel | `[[,...]]` +|
+| `dataLabels` | * Names of additional columns of stim data | `["s",...]` |
+| `probe` | * Root group for NIRS probe information | `{.}` * |
+| `wavelengths` | * List of wavelengths (in nm) | `[,...]` * |
+| `wavelengthsEmission` | * List of emission wavelengths (in nm) | `[,...]` |
+| `sourcePos2D` | * Source 2-D positions in `LengthUnit` | `[[,...]]`*1|
+| `sourcePos3D` | * Source 3-D positions in `LengthUnit` | `[[,...]]`*1|
+| `detectorPos2D` | * Detector 2-D positions in `LengthUnit` | `[[,...]]`*2|
+| `detectorPos3D` | * Detector 3-D positions in `LengthUnit` | `[[,...]]`*2|
+| `frequencies` | * Modulation frequency list | `[,...]` |
+| `timeDelays` | * Time delays for gated time-domain data | `[,...]` |
+| `timeDelayWidths` | * Time delay width for gated time-domain data| `[,...]` |
+| `momentOrders` | * Moment orders of the moment TD data | `[,...]` |
+| `correlationTimeDelays` | * Time delays for DCS measurements | `[,...]` |
+| `correlationTimeDelayWidths` | * Time delay width for DCS measurements | `[,...]` |
+| `sourceLabels` | * String arrays specifying source names | `[["s",...]]` |
+| `detectorLabels` | * String arrays specifying detector names | `["s",...]` |
+| `landmarkPos2D` | * Anatomical landmark 2-D positions | `[[,...]]` |
+| `landmarkPos3D` | * Anatomical landmark 3-D positions | `[[,...]]` |
+| `landmarkLabels` | * String arrays specifying landmark names | `["s",...]` |
+| `coordinateSystem` | * Coordinate system used in probe description| `"s"` |
+| `coordinateSystemDescription` | * Description of coordinate system | `"s"` |
+| `aux{i}` | * Root-group for auxiliary measurements | `{i}` |
+| `name` | * Name of the auxiliary channel | `"s"` + |
+| `dataTimeSeries` | * Data acquired from the auxiliary channel | `[[,...]]` +|
+| `dataUnit` | * SI unit of the auxiliary channel | `"s"` |
+| `time` | * Time (in `TimeUnit`) for auxiliary data | `[,...]` + |
+| `timeOffset` | * Time offset of auxiliary channel data | `[,...]` |
+
+[//]: # (SCHEMA END)
+
+In the above schema table, the used notations are explained below:
+* `{.}` represents a simple HDF5 group
+* `{i}` represents an HDF5 group with one or multiple sub-groups (i.e. an indexed-group)
+* `` represents an integer value
+* `` represents a numeric value
+* `"s"` represents a string of arbitrary length
+* `[...]` represents a 1-D vector (dataset), can be empty
+* `[[...]]` represents a 2-D array (dataset), can be empty
+* `...` (optional) additional elements similar to the previous element
+* `*` in the last column indicates a required subfield
+* `*n` in the last column indicates that at least one of the subfields in the subgroup identified by `n` is required
+* `+` in the last column indicates a required subfield if the optional parent object is included
+
+### SNIRF data container definitions
+
+#### /formatVersion
+* **Presence**: required
+* **Type**: string
+* **Location**: `/formatVersion`
+
+This is a string that specifies the version of the file format. This document
+describes format version "1.0"
+
+#### /nirs(i)
+* **Presence**: required
+* **Type**: indexed group
+* **Location**: `/nirs(i)`
+
+This group stores one set of NIRS data. This can be extended by adding the count
+number (e.g. `/nirs1`, `/nirs2`,...) to the group name. This is intended to
+allow the storage of 1 or more complete NIRS datasets inside a single SNIRF
+document. For example, a two-subject hyperscanning can be stored using the notation
+* `/nirs1` = first subject's data
+* `/nirs2` = second subject's data
+The use of a non-indexed (e.g. `/nirs`) entry is allowed when only one entry
+is present and is assumed to be entry 1.
+
+
+#### /nirs(i)/metaDataTags
+* **Presence**: required
+* **Type**: group
+* **Location**: `/nirs(i)/metaDataTags`
+
+The `metaDataTags` group contains the metadata associated with the measurements.
+Each metadata record is represented as a dataset under this group - with the name of
+the record, i.e. the key, as the dataset's name, and the value of the record as the
+actual data stored in the dataset. Each metadata record can potentially have different
+data types. Sub-groups should not be used to organize metadata records: a member of the `metaDataTags` Group must be a Dataset.
+
+The below five metadata records are minimally required in a SNIRF file
+
+#### /nirs(i)/metaDataTags/SubjectID
+* **Presence**: required as part of `metaDataTags`
+* **Type**: string
+* **Location**: `/nirs(i)/metaDataTags/SubjectID`
+
+This record stores the string-valued ID of the study subject or experiment.
+
+#### /nirs(i)/metaDataTags/MeasurementDate
+* **Presence**: required as part of `metaDataTags`
+* **Type**: string
+* **Location**: `/nirs(i)/metaDataTags/MeasurementDate`
+
+This record stores the date of the measurement as a string. The format of the date
+string must either be `"unknown"`, or follow the ISO 8601 date string format `YYYY-MM-DD`, where
+- `YYYY` is the 4-digit year
+- `MM` is the 2-digit month (padding zero if a single digit)
+- `DD` is the 2-digit date (padding zero if a single digit)
+
+#### /nirs(i)/metaDataTags/MeasurementTime
+* **Presence**: required as part of `metaDataTags`
+* **Type**: string
+* **Location**: `/nirs(i)/metaDataTags/MeasurementTime`
+
+This record stores the time of the measurement as a string. The format of the time
+string must either be `"unknown"` or follow the ISO 8601 time string format `hh:mm:ss.sTZD`, where
+- `hh` is the 2-digit hour
+- `mm` is the 2-digit minute
+- `ss` is the 2-digit second
+- `.s` is 1 or more digit representing a decimal fraction of a second (optional)
+- `TZD` is the time zone designator (`Z` or `+hh:mm` or `-hh:mm`)
+
+#### /nirs(i)/metaDataTags/LengthUnit
+* **Presence**: required as part of `metaDataTags`
+* **Type**: string
+* **Location**: `/nirs(i)/metaDataTags/LengthUnit`
+
+This record stores the **case-sensitive** SI length unit used in this
+measurement. Sample length units include "mm", "cm", and "m". A value of
+"um" is the same as "mm", i.e. micrometer.
+
+#### /nirs(i)/metaDataTags/TimeUnit
+* **Presence**: required as part of `metaDataTags`
+* **Type**: string
+* **Location**: `/nirs(i)/metaDataTags/TimeUnit`
+
+This record stores the **case-sensitive** SI time unit used in this
+measurement. Sample time units include "s", and "ms". A value of "us"
+is the same as "ms", i.e. microsecond.
+
+#### /nirs(i)/metaDataTags/FrequencyUnit
+* **Presence**: required as part of `metaDataTags`
+* **Type**: string
+* **Location**: `/nirs(i)/metaDataTags/FrequencyUnit`
+
+This record stores the **case-sensitive** SI frequency unit used in
+this measurement. Sample frequency units "Hz", "MHz" and "GHz". Please
+note that "mHz" is milli-Hz while "MHz" denotes "mega-Hz" according to
+SI unit system.
+
+We do not limit the total number of metadata records in the `metaDataTags`. Users
+can add additional customized metadata records; no duplicated metadata record names
+are allowed.
+
+Additional metadata record samples can be found in the below table.
+
+| Metadata Key Name | Metadata value |
+|-------------------|----------------|
+|ManufacturerName | "Company Name" |
+|Model | "Model Name" |
+|SubjectName | "LastName, FirstName" |
+|DateOfBirth | "YYYY-MM-DD" |
+|AcquisitionStartTime | "1569465620" |
+|StudyID | "Infant Brain Development" |
+|StudyDescription | "In this study, we measure ...." |
+|AccessionNumber | "##########################" |
+|InstanceNumber | 2 |
+|CalibrationFileName | "phantomcal_121015.snirf" |
+|UnixTime | "1569465667" |
+
+The metadata records `"StudyID"` and `"AccessionNumber"` are unique strings that
+can be used to link the current dataset to a particular study and a particular
+procedure, respectively. The `"StudyID"` tag is similar to the DICOM tag "Study
+ID" (0020,0010) and `"AccessionNumber"` is similar to the DICOM tag "Accession
+Number"(0008,0050), as defined in the DICOM standard (ISO 12052).
+
+The metadata record `"InstanceNumber"` is defined similarly to the DICOM tag
+"Instance Number" (0020,0013), and can be used as the sequence number to group
+multiple datasets into a larger dataset - for example, concatenating streamed
+data segments during a long measurement session.
+
+The metadata record `"UnixTime"` defines the Unix Epoch Time, i.e. the total elapse
+time in seconds since 1970-01-01T00:00:00Z (UTC) minus the leap seconds.
+
+#### /nirs(i)/data(j)
+* **Presence**: required
+* **Type**: indexed group
+* **Location**: `/nirs(i)/data(j)`
+
+This group stores one block of NIRS data. This can be extended adding the
+count number (e.g. `data1`, `data2`,...) to the group name. This is intended to
+allow the storage of 1 or more blocks of NIRS data from within the same `/nirs`
+entry
+* `/nirs/data1` = data block 1
+* `/nirs/data2` = data block 2
+
+
+#### /nirs(i)/data(j)/dataTimeSeries
+* **Presence**: required
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/data(j)/dataTimeSeries`
+
+This is the actual raw or processed data variable. This variable has dimensions
+of ` x `. Columns in
+`dataTimeSeries` are mapped to the measurement list (`measurementList` variable
+described below).
+
+`dataTimeSeries` can be compressed using the HDF5 filter (using the built-in
+[`deflate`](https://portal.hdfgroup.org/display/HDF5/H5P_SET_DEFLATE)
+filter or [3rd party filters such as `305-LZO` or `307-bzip2`](https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins)
+
+Chunked data is allowed to support real-time streaming of data in this array.
+
+
+#### /nirs(i)/data(j)/dataOffset
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/data(j)/dataOffset`
+
+This stores an optional offset value per channel, which, when added to
+`/nirs(i)/data(j)/dataTimeSeries`, results in absolute data values.
+
+The length of this array is equal to the as represented
+by the second dimension in the `dataTimeSeries`.
+
+
+#### /nirs(i)/data(j)/time
+* **Presence**: required
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/data(j)/time`
+
+The `time` variable. This provides the acquisition time of the measurement
+relative to the time origin. This will usually be a straight line with slope
+equal to the acquisition frequency, but does not need to be equal spacing. For
+the special case of equal sample spacing an array of length `<2>` is allowed
+where the first entry is the start time and the
+second entry is the sample time spacing in `TimeUnit` specified in the
+`metaDataTags`. The default time unit is in second ("s"). For example,
+a time spacing of 0.2 (s) indicates a sampling rate of 5 Hz.
+
+* **Option 1** - The size of this variable is `` and
+ corresponds to the sample time of every data point
+* **Option 2**- The size of this variable is `<2>` and corresponds to the start
+ time and sample spacing.
+
+Chunked data is allowed to support real-time streaming of data in this array.
+
+#### /nirs(i)/data(j)/measurementList(k)
+* **Presence**: required if `measurementLists` is not present
+* **Type**: indexed group
+* **Location**: `/nirs(i)/data(j)/measurementList(k)`
+
+The measurement list. This variable serves to map the data array onto the probe
+geometry (sources and detectors), data type, and wavelength. This variable is
+an array structure that has the size `` that
+describes the corresponding column in the data matrix. For example, the
+`measurementList3` describes the third column of the data matrix (i.e.
+`dataTimeSeries(:,3)`).
+
+Each element of the array is a structure which describes the measurement
+conditions for this data with the following fields:
+
+
+#### /nirs(i)/data(j)/measurementList(k)/sourceIndex
+* **Presence**: required
+* **Type**: integer
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/sourceIndex`
+
+Index of the source.
+
+#### /nirs(i)/data(j)/measurementList(k)/detectorIndex
+* **Presence**: required
+* **Type**: integer
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/detectorIndex`
+
+Index of the detector.
+
+#### /nirs(i)/data(j)/measurementList(k)/wavelengthIndex
+* **Presence**: required
+* **Type**: integer
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/wavelengthIndex`
+
+Index of the "nominal" wavelength (in `probe.wavelengths`).
+
+#### /nirs(i)/data(j)/measurementList(k)/wavelengthActual
+* **Presence**: optional
+* **Type**: numeric
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/wavelengthActual`
+
+Actual (measured) wavelength in nm, if available, for the source in a given channel.
+
+#### /nirs(i)/data(j)/measurementList(k)/wavelengthEmissionActual
+* **Presence**: optional
+* **Type**: numeric
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/wavelengthEmissionActual`
+
+Actual (measured) emission wavelength in nm, if available, for the source in a given channel.
+
+#### /nirs(i)/data(j)/measurementList(k)/dataType
+* **Presence**: required
+* **Type**: integer
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/dataType`
+
+Data-type identifier. See Appendix for list possible values.
+
+#### /nirs(i)/data(j)/measurementList(k)/dataUnit
+* **Presence**: optional
+* **Type**: string
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/dataUnit`
+
+International System of Units (SI units) identifier for the given channel. Encoding should follow the [CMIXF-12 standard](https://people.csail.mit.edu/jaffer/MIXF/CMIXF-12), avoiding special unicode symbols like U+03BC (m) or U+00B5 (u) and using '/' rather than 'per' for units such as `V/us`. The recommended export format is in unscaled units such as V, s, Mole.
+
+#### /nirs(i)/data(j)/measurementList(k)/dataTypeLabel
+* **Presence**: optional
+* **Type**: string
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/dataTypeLabel`
+
+Data-type label. Only required if dataType is "processed" (`99999`). See Appendix
+for list of possible values.
+
+#### /nirs(i)/data(j)/measurementList(k)/dataTypeIndex
+* **Presence**: required
+* **Type**: integer
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/dataTypeIndex`
+
+Data-type specific parameter index. The data type index specifies additional data type specific parameters that are further elaborated by other fields in the probe structure, as detailed below. Note that where multiple parameters are required, the same index must be used into each (examples include data types such as Time Domain and Diffuse Correlation Spectroscopy). One use of this parameter is as a stimulus condition index when `measurementList(k).dataType = 99999` (i.e, `processed` and `measurementList(k).dataTypeLabel = 'HRF ...'` .
+
+#### /nirs(i)/data(j)/measurementList(k)/sourcePower
+* **Presence**: optional
+* **Type**: numeric
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/sourcePower`
+
+The units are not defined, unless the user takes the option of using a `metaDataTag` as described below.
+
+#### /nirs(i)/data(j)/measurementList(k)/detectorGain
+* **Presence**: optional
+* **Type**: numeric
+* **Location**: `/nirs(i)/data(j)/measurementList(k)/detectorGain`
+
+Detector gain
+
+For example, if `measurementList5` is a structure with `sourceIndex=2`,
+`detectorIndex=3`, `wavelengthIndex=1`, `dataType=1`, `dataTypeIndex=1` would
+imply that the data in the 5th column of the `dataTimeSeries` variable was
+measured with source #2 and detector #3 at wavelength #1. Wavelengths (in
+nanometers) are described in the `probe.wavelengths` variable (described
+later). The data type in this case is 1, implying that it was a continuous wave
+measurement. The complete list of currently supported data types is found in
+the Appendix. The data type index specifies additional data type specific
+parameters that are further elaborated by other fields in the `probe`
+structure, as detailed below. Note that the Time Domain and Diffuse Correlation
+Spectroscopy data types have two additional parameters and so the data type
+index must be a vector with 2 elements that index the additional parameters.
+
+`sourcePower` provides the option for information about the source power for
+that channel to be saved along with the data. The units are not defined, unless
+the user takes the option of using a `metaDataTag` described below to define,
+for instance, `sourcePowerUnit`. `detectorGain` provides the option for
+information about the detector gain for that channel to be saved along with the
+data.
+
+Note: The source indices generally refer to the optode naming (probe
+positions) and not necessarily the physical laser numbers on the instrument.
+The same is true for the detector indices. Each source optode would generally,
+but not necessarily, have 2 or more wavelengths (hence lasers) plugged into it
+in order to calculate deoxy- and oxy-hemoglobin concentrations. The data from
+these two wavelengths will be indexed by the same source, detector, and data
+type values, but have different wavelength indices. Using the same source index
+for lasers at the same location but with different wavelengths simplifies the
+bookkeeping for converting intensity measurements into concentration changes.
+As described below, optional variables `probe.sourceLabels` and
+`probe.detectorLabels` are provided for indicating the instrument specific
+label for sources and detectors.
+
+#### /nirs(i)/data(j)/measurementLists
+* **Presence**: required if measurementList is not present
+* **Type**: group
+* **Location**: `/nirs(i)/data(j)/measurementLists`
+
+The group for measurement list variables which map the data array onto the probe geometry (sources and detectors), data type, and wavelength. This group's datasets are arrays with size ``, with each position describing the corresponding column in the data matrix. (i.e. the values at `measurementLists/sourceIndex(3)` and `measurementLists/detectorIndex(3)` correspond to `dataTimeSeries(:,3)`).
+
+This group is required only if the indexed-group format `/nirs(i)/data(j)/measurementList(k)` is not used to encode the measurement list. `measurementLists` is an alternative that may offer better performance for larger probes.
+
+The arrays of `measurementLists` are:
+
+#### /nirs(i)/data(j)/measurementLists/sourceIndex
+* **Presence**: required if measurementLists is present
+* **Type**: integer 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/sourceIndex`
+
+Source indices for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/detectorIndex
+* **Presence**: required if measurementLists is present
+* **Type**: integer 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/detectorIndex`
+
+Detector indices for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/wavelengthIndex
+* **Presence**: required if measurementLists is present
+* **Type**: integer 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/wavelengthIndex`
+
+Index of the "nominal" wavelength (in `probe.wavelengths`) for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/wavelengthActual
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/wavelengthActual`
+
+Actual (measured) wavelength in nm, if available, for the source in each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/wavelengthEmissionActual
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/wavelengthEmissionActual`
+
+Actual (measured) emission wavelength in nm, if available, for the source in each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/dataType
+* **Presence**: required if measurementLists is present
+* **Type**: integer 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/dataType`
+
+A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. See Appendix for list of possible values.
+
+#### /nirs(i)/data(j)/measurementLists/dataUnit
+* **Presence**: optional
+* **Type**: string 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/dataUnit`
+
+International System of Units (SI units) identifier for each channel. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/dataTypeLabel
+* **Presence**: optional
+* **Type**: string 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/dataTypeLabel`
+
+Data-type label. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`.
+
+#### /nirs(i)/data(j)/measurementLists/dataTypeIndex
+* **Presence**: required if measurementLists is present
+* **Type**: integer 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/dataTypeIndex`
+
+Data-type specific parameter indices. A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Note that the Time Domain and Diffuse Correlation Spectroscopy data types have two additional parameters and so `dataTimeIndex` must be a 2-D array with 2 columns that index the additional parameters.
+
+#### /nirs(i)/data(j)/measurementLists/sourcePower
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/sourcePower`
+
+A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Units are optionally defined in `metaDataTags`.
+
+#### /nirs(i)/data(j)/measurementLists/detectorGain
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/data(j)/measurementLists/detectorGain`
+
+A 1-D array with length equal to the size of the second dimension of `/nirs(i)/data(j)/dataTimeSeries`. Units are optionally defined in `metaDataTags`.
+
+#### /nirs(i)/stim(j)
+* **Presence**: optional
+* **Type**: indexed group
+* **Location**: `/nirs(i)/stim(j)`
+
+This is an array describing any stimulus conditions. Each element of the array
+has the following required fields.
+
+
+#### /nirs(i)/stim(j)/name
+* **Presence**: required as part of `stim(i)`
+* **Type**: string
+* **Location**: `/nirs(i)/stim(j)/name`
+
+This is a string describing the jth stimulus condition.
+
+
+#### /nirs(i)/stim(j)/data
+* **Presence**: required as part of `stim(i)`
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/stim(j)/data`
+* **Allowed attribute**: `names`
+
+This is a numeric 2-D array with at least 3 columns, specifying the stimulus
+time course for the jth condition. Each row corresponds to a
+specific stimulus trial. The first three columns indicate `[starttime duration value]`.
+The starttime, in seconds, is the time relative to the time origin when the
+stimulus takes on a value; the duration is the time in seconds that the stimulus
+value continues, and value is the stimulus amplitude. The number of rows is
+not constrained. (see examples in the appendix).
+
+Additional columns can be used to store user-specified data associated with
+each stimulus trial. An optional record `/nirs(i)/stim(j)/dataLabels` can be
+used to annotate the meanings of each data column.
+
+#### /nirs(i)/stim(j)/dataLabels
+* **Presence**: optional
+* **Type**: string 1-D array
+* **Location**: `/nirs(i)/stim(j)/dataLabels(k)`
+
+This is a string array providing annotations for each data column in
+`/nirs(i)/stim(j)/data`. Each element of the array must be a string;
+the total length of this array must be the same as the column number
+of `/nirs(i)/stim(j)/data`, including the first 3 required columns.
+
+#### /nirs(i)/probe
+* **Presence**: required
+* **Type**: group
+* **Location**: `/nirs(i)/probe `
+
+This is a structured variable that describes the probe (source-detector)
+geometry. This variable has a number of required fields.
+
+#### /nirs(i)/probe/wavelengths
+* **Presence**: required
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/wavelengths`
+
+This field describes the "nominal" wavelengths used (in `nm` unit). This is indexed by the
+`wavelengthIndex` of the measurementList variable. For example, `probe.wavelengths` = [690,
+780, 830]; implies that the measurements were taken at three wavelengths (690 nm,
+780 nm, and 830 nm). The wavelength index of
+`measurementList(k).wavelengthIndex` variable refers to this field.
+`measurementList(k).wavelengthIndex` = 2 means the kth measurement
+was at 780 nm.
+
+Please note that this field stores the "nominal" wavelengths. If the precise
+(measured) wavelengths differ from the nominal wavelengths, one can store those
+in the `measurementList.wavelengthActual` field in a per-channel fashion.
+
+The number of wavelengths is not limited (except that at least two are needed
+to calculate the two forms of hemoglobin). Each source-detector pair would
+generally have measurements at all wavelengths.
+
+This field must present, but can be empty, for example, in the case that the stored
+data are processed data (`dataType=99999`, see Appendix).
+
+
+#### /nirs(i)/probe/wavelengthsEmission
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/wavelengthsEmission`
+
+This field is required only for fluorescence data types, and describes the
+"nominal" emission wavelengths used (in `nm` unit). The indexing of this variable is the same
+wavelength index in measurementList used for `probe.wavelengths` such that the
+excitation wavelength is paired with this emission wavelength for a given measurement.
+
+Please note that this field stores the "nominal" emission wavelengths. If the precise
+(measured) emission wavelengths differ from the nominal ones, one can store those
+in the `measurementList.wavelengthEmissionActual` field in a per-channel fashion.
+
+
+#### /nirs(i)/probe/sourcePos2D
+* **Presence**: at least one of `sourcePos2D` or `sourcePos3D` is required
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/probe/sourcePos2D`
+
+This field describes the position (in `LengthUnit` units) of each source
+optode. The positions are coordinates in a flattened 2D probe layout.
+This field has size ` x 2`. For example,
+`probe.sourcePos2D(1,:) = [1.4 1]`, and `LengthUnit='cm'` places source
+number 1 at x=1.4 cm and y=1 cm.
+
+
+#### /nirs(i)/probe/sourcePos3D
+* **Presence**: at least one of `sourcePos2D` or `sourcePos3D` is required
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/probe/sourcePos3D`
+
+This field describes the position (in `LengthUnit` units) of each source
+optode in 3D. This field has size ` x 3`.
+
+
+#### /nirs(i)/probe/detectorPos2D
+* **Presence**: at least one of `detectorPos2D` or `detectorPos3D` is required
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/probe/detectorPos2D`
+
+Same as `probe.sourcePos2D`, but describing the detector positions in a
+flattened 2D probe layout.
+
+
+#### /nirs(i)/probe/detectorPos3D
+* **Presence**: at least one of `detectorPos2D` or `detectorPos3D` is required
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/probe/detectorPos3D`
+
+This field describes the position (in `LengthUnit` units) of each detector
+optode in 3D, defined similarly to `sourcePos3D`.
+
+
+#### /nirs(i)/probe/frequencies
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/frequencies`
+
+This field describes the frequencies used (in `FrequencyUnit` units) for
+frequency domain measurements. This field is only required for frequency
+domain data types, and is indexed by `measurementList(k).dataTypeIndex`.
+
+
+#### /nirs(i)/probe/timeDelays
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/timeDelays`
+
+This field describes the time delays (in `TimeUnit` units) used for gated time domain measurements.
+This field is only required for gated time domain data types, and is indexed by
+`measurementList(k).dataTypeIndex`. The indexing of this field is paired with
+the indexing of `probe.timeDelayWidths`.
+
+
+#### /nirs(i)/probe/timeDelayWidths
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/timeDelayWidths`
+
+This field describes the time delay widths (in `TimeUnit` units) used for gated time domain
+measurements. This field is only required for gated time domain data types, and
+is indexed by `measurementList(k).dataTypeIndex`. The indexing of this field
+is paired with the indexing of `probe.timeDelays`.
+
+
+#### /nirs(i)/probe/momentOrders
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/momentOrders`
+
+This field describes the moment orders of the temporal point spread function (TPSF) or the distribution of time-of-flight (DTOF)
+for moment time domain measurements. This field is only required for moment time domain data types, and is indexed by `measurementList(k).dataTypeIndex`.
+Note that the numeric value in this array is the exponent in the integral used for calculating the moments. For detailed/specific definitions of moments, see [Wabnitz et al, 2020](https://doi.org/10.1364/BOE.396585); for general definitions of moments see [here](https://en.wikipedia.org/wiki/Moment_(mathematics) ).
+
+In brief, given a TPSF or DTOF N(t) (photon counts vs. photon arrival time at the detector): \
+momentOrder = 0: total counts: `N_total = \intergral N(t)dt` \
+momentOrder = 1: mean time of flight: `m = = (1/N_total) \integral t N(t) dt` \
+momentOrder = 2: variance/second central moment: `V = (1/N_total) \integral (t - )^2 N(t) dt` \
+Please note that all moments (for orders >=1) are expected to be normalized by the total counts (i.e. n=0); Additionally all moments (for orders >= 2) are expected to be centralized.
+
+
+#### /nirs(i)/probe/correlationTimeDelays
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/correlationTimeDelays`
+
+This field describes the time delays (in `TimeUnit` units) used for diffuse correlation spectroscopy
+measurements. This field is only required for diffuse correlation spectroscopy
+data types, and is indexed by `measurementList(k).dataTypeIndex`. The indexing
+of this field is paired with the indexing of `probe.correlationTimeDelayWidths`.
+
+
+#### /nirs(i)/probe/correlationTimeDelayWidths
+* **Presence**: optional
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/probe/correlationTimeDelayWidth`
+
+This field describes the time delay widths (in `TimeUnit` units) used for diffuse correlation
+spectroscopy measurements. This field is only required for gated time domain
+data types, and is indexed by `measurementList(k).dataTypeIndex`. The indexing
+of this field is paired with the indexing of `probe.correlationTimeDelays`.
+
+
+#### /nirs(i)/probe/sourceLabels
+* **Presence**: optional
+* **Type**: string 2-D array
+* **Location**: `/nirs(i)/probe/sourceLabels(j)`
+
+This is a string array providing user friendly or instrument specific labels
+for each source. Each element of the array must be a unique string among both
+`probe.sourceLabels` and `probe.detectorLabels`.This can be of size `x 1` or ` x `. This is indexed by `measurementList(k).sourceIndex` and
+`measurementList(k).wavelengthIndex`.
+
+
+#### /nirs(i)/probe/detectorLabels
+* **Presence**: optional
+* **Type**: string 1-D array
+* **Location**: `/nirs(i)/probe/detectorLabels(j)`
+
+This is a string array providing user friendly or instrument specific labels
+for each detector. Each element of the array must be a unique string among both
+`probe.sourceLabels` and `probe.detectorLabels`. This is indexed by
+`measurementList(k).detectorIndex`.
+
+
+#### /nirs(i)/probe/landmarkPos2D
+* **Presence**: optional
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/probe/landmarkPos2D`
+
+This is a 2-D array storing the neurological landmark positions projected
+along the 2-D (flattened) probe plane in order to map optical data from the
+flattened optode positions to brain anatomy. This array should contain a minimum
+of 2 columns, representing the x and y coordinates (in `LengthUnit` units)
+of the 2-D projected landmark positions. If a 3rd column presents, it stores
+the index to the labels of the given landmark. Label names are stored in the
+`probe.landmarkLabels` subfield. An label index of 0 refers to an undefined landmark.
+
+
+#### /nirs(i)/probe/landmarkPos3D
+* **Presence**: optional
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/probe/landmarkPos3D`
+
+This is a 2-D array storing the neurological landmark positions measurement
+from 3-D digitization and tracking systems to facilitate the registration and
+mapping of optical data to brain anatomy. This array should contain a minimum
+of 3 columns, representing the x, y and z coordinates (in `LengthUnit` units)
+of the digitized landmark positions. If a 4th column presents, it stores the
+index to the labels of the given landmark. Label names are stored in the
+`probe.landmarkLabels` subfield. An label index of 0 refers to an undefined landmark.
+
+
+#### /nirs(i)/probe/landmarkLabels(j)
+* **Presence**: optional
+* **Type**: string 1-D array
+* **Location**: `/nirs(i)/probe/landmarkLabels(j)`
+
+This string array stores the names of the landmarks. The first string denotes
+the name of the landmarks with an index of 1 in the 4th column of
+`probe.landmark`, and so on. One can adopt the commonly used 10-20 landmark
+names, such as "Nasion", "Inion", "Cz" etc, or use user-defined landmark
+labels. The landmark label can also use the unique source and detector labels
+defined in `probe.sourceLabels` and `probe.detectorLabels`, respectively, to
+associate the given landmark to a specific source or detector. All strings are
+ASCII encoded char arrays.
+
+
+#### /nirs(i)/probe/coordinateSystem
+* **Presence**: optional
+* **Type**: string
+* **Location**: `/nirs(i)/probe/coordinateSystem`
+
+Defines the coordinate system for sensor positions.
+The string must be one of the coordinate systems listed in the
+[BIDS specification (Appendix VII)](https://bids-specification.readthedocs.io/en/stable/99-appendices/08-coordinate-systems.html#standard-template-identifiers)
+such as "MNI152NLin2009bAsym", "CapTrak" or "Other".
+If the value "Other" is specified, then a defition of the coordinate
+system must be provided in `/nirs(i)/probe/coordinateSystemDescription`.
+See the [FieldTrip toolbox web page](https://www.fieldtriptoolbox.org/faq/coordsys/)
+for detailed descriptions of different coordinate systems.
+
+
+#### /nirs(i)/probe/coordinateSystemDescription
+* **Presence**: optional
+* **Type**: string
+* **Location**: `/nirs(i)/probe/coordinateSystemDescription`
+
+Free-form text description of the coordinate system.
+May also include a link to a documentation page or
+paper describing the system in greater detail.
+This field is required if the `coordinateSystem` field is set to "Other".
+
+
+#### /nirs(i)/aux(j)
+* **Presence**: optional
+* **Type**: indexed group
+* **Location**: `/nirs(i)/aux(j)`
+
+This optional array specifies any recorded auxiliary data. Each element of
+`aux` has the following required fields:
+
+#### /nirs(i)/aux(j)/name
+* **Presence**: optional; required if `aux` is used
+* **Type**: string
+* **Location**: `/nirs(i)/aux(j)/name`
+
+This is string describing the jth auxiliary data timecourse. While auxiliary data can be given any title, standard names for commonly used auxiliary channels (i.e. accelerometer data) are specified in the appendix.
+
+#### /nirs(i)/aux(j)/dataTimeSeries
+* **Presence**: optional; required if `aux` is used
+* **Type**: numeric 2-D array
+* **Location**: `/nirs(i)/aux(j)/dataTimeSeries`
+
+This is the aux data variable. This variable has dimensions of ` x `. If multiple channels of related data are generated by a system, they may be encoded in the multiple columns of the time series (i.e. complex numbers). For example, a system containing more than one accelerometer may output this data as a set of `ACCEL_X`/`ACCEL_Y`/`ACCEL_Z` auxiliary time series, where each has the dimension of ` x `. Note that it is NOT recommended to encode the various accelerometer dimensions as multiple channels of the same `aux` Group: instead follow the `"ACCEL_X"`, `"ACCEL_Y"`, `"ACCEL_Z"` naming conventions described in the appendix. Chunked data is allowed to support real-time data streaming.
+
+#### /nirs(i)/aux(j)/dataUnit
+* **Presence**: optional
+* **Type**: string
+* **Location**: `/nirs(i)/aux(j)/dataUnit`
+
+International System of Units (SI units) identifier for the given channel. Encoding should follow the [CMIXF-12 standard](https://people.csail.mit.edu/jaffer/MIXF/CMIXF-12), avoiding special unicode symbols like U+03BC (m) or U+00B5 (u) and using '/' rather than 'per' for units such as `V/us`. The recommended export format is in unscaled units such as V, s, Mole.
+
+#### /nirs(i)/aux(j)/time
+* **Presence**: optional; required if `aux` is used
+* **Type**: numeric 1-D array
+* **Location**: `/nirs(i)/aux(j)/time`
+
+The time variable. This provides the acquisition time (in `TimeUnit` units)
+of the aux measurement relative to the time origin. This will usually be
+a straight line with slope equal to the acquisition frequency, but does
+not need to be equal spacing. The size of this variable is
+`` or `<2>` similar to definition of the
+`/nirs(i)/data(j)/time` field.
+
+Chunked data is allowed to support real-time data streaming
+
+#### /nirs(i)/aux(j)/timeOffset
+* **Presence**: optional
+* **Type**: numeric
+* **Location**: `/nirs(i)/aux(j)/timeOffset`
+
+This variable specifies the offset of the file time origin relative to absolute
+(clock) time in `TimeUnit` units.
+
+
+## Appendix
+
+### Supported `measurementList(k).dataType` values in `dataTimeSeries`
+
++ 001-100: Raw - Continuous Wave (CW)
+ - 001 - Amplitude
+ - 051 - Fluorescence Amplitude
+
++ 101-200: Raw - Frequency Domain (FD)
+ - 101 - AC Amplitude
+ - 102 - Phase
+ - 151 - Fluorescence Amplitude
+ - 152 - Fluorescence Phase
+
++ 201-300: Raw - Time Domain - Gated (TD Gated)
+ - 201 - Amplitude
+ - 251 - Fluorescence Amplitude
++ 301-400: Raw - Time domain - Moments (TD Moments)
+ - 301 - Amplitude
+ - 351 - Fluorescence Amplitude
++ 401-500: Raw - Diffuse Correlation Spectroscopy (DCS):
+ - 401 - g2
+ - 410 - BFi
++ 99999: Processed
+
+
+### Supported `measurementList(k).dataTypeLabel` values in `dataTimeSeries`
+
+| Tag Name | Meanings |
+|-----------|------------------------------------------------------------------|
+|"dOD" | Change in optical density |
+|"dMean" | Change in mean time-of-flight |
+|"dVar" | Change in variance (2nd central moment) |
+|"dSkew" | Change in skewness (3rd central moment) |
+|"mua" | Absorption coefficient |
+|"musp" | Scattering coefficient |
+|"HbO" | Oxygenated hemoglobin (oxyhemoglobin) concentration |
+|"HbR" | Deoxygenated hemoglobin (deoxyhemoglobin) concentration |
+|"HbT" | Total hemoglobin concentration |
+|"H2O" | Water content |
+|"Lipid" | Lipid concentration |
+|"StO2" | Tissue oxygen saturation |
+|"BFi" | Blood flow index |
+|"HRF dOD" | Hemodynamic response function for change in optical density |
+|"HRF dMean"| HRF for change in mean time-of-flight |
+|"HRF dVar" | HRF for change in variance (2nd central moment) |
+|"HRF dSkew"| HRF for change in skewness (3rd central moment) |
+|"HRF HbO" | Hemodynamic response function for oxyhemoglobin concentration |
+|"HRF HbR" | Hemodynamic response function for deoxyhemoglobin concentration |
+|"HRF HbT" | Hemodynamic response function for total hemoglobin concentration |
+|"HRF BFi" | Hemodynamic response function for blood flow index |
+
+
+### Supported `/nirs(i)/aux(j)/name` values
+
+| Tag Name | Meanings |
+|-----------|------------------------------------------------------------------|
+|"ACCEL_X" | Accelerometer data, first axis of orientation |
+|"ACCEL_Y" | Accelerometer data, second axis of orientation |
+|"ACCEL_Z" | Accelerometer data, third axis of orientation |
+|"GYRO_X" | Gyrometer data, first axis of orientation |
+|"GYRO_Y" | Gyrometer data, second axis of orientation |
+|"GYRO_Z" | Gyrometer data, third axis of orientation |
+|"MAGN_X" | Magnetometer data, first axis of orientation |
+|"MAGN_Y" | Magnetometer data, second axis of orientation |
+|"MAGN_Z" | Magnetometer data, third axis of orientation |
+
+
+### Examples of stimulus waveforms
+
+Assume there are 10 time points, starting at zero, spaced 0.1s apart. If we
+assume a stimulus to be a 0.2 second off, 0.2 second on repeating block, it
+would be specified as follows:
+```
+ [0.2 0.2 1.0]
+ [0.6 0.2 1.0]
+```
+
+### Code samples
+
+The following code demonstrates how to use the Python `h5py` and `numpy` libraries and the MATLAB `H5ML.hdf5lib2` "low-level" interface to write specified SNIRF datatypes to disk as HDF5 Datasets of the proper format.
+
+#### String `"s"`
+
+**MATLAB**
+```matlab
+fid = H5F.open(, 'H5F_ACC_RDWR', 'H5P_DEFAULT')
+sid = H5S.create('H5S_SCALAR')
+tid = H5T.copy('H5T_C_S1');
+H5T.set_size(tid, 'H5T_VARIABLE');
+did = H5D.create(fid, , tid, sid, 'H5P_DEFAULT')
+H5D.write(did, tid, 'H5S_ALL', 'H5S_ALL', 'H5P_DEFAULT', )
+```
+**Python**
+```python
+file = h5py.File(, 'r+')
+varlen_str_dtype = h5py.string_dtype(encoding='ascii', length=None)
+file.create_dataset(, dtype=varlen_str_dtype, data=)
+```
+
+#### numeric ``
+
+**MATLAB**
+```matlab
+fid = H5F.open(, 'H5F_ACC_RDWR', 'H5P_DEFAULT')
+tid = H5T.copy('H5T_NATIVE_DOUBLE')
+sid = H5S.create('H5S_SCALAR')
+H5D.create(fid, , tid, sid, 'H5P_DEFAULT')
+h5write(, , )
+```
+**Python**
+```python
+file = h5py.File(, 'r+')
+file.create_dataset(, dtype='f8', data=)
+```
+
+#### integer ``
+**MATLAB**
+```matlab
+fid = H5F.open(, 'H5F_ACC_RDWR', 'H5P_DEFAULT')
+tid = H5T.copy('H5T_NATIVE_INT')
+sid = H5S.create('H5S_SCALAR')
+H5D.create(fid, , tid, sid, 'H5P_DEFAULT')
+h5write(, , )
+```
+**Python**
+```python
+file = h5py.File(, 'r+')
+file.create_dataset(, dtype='i4', data=)
+```
+
+#### string array `["s",...]`
+**MATLAB**
+```matlab
+fid = H5F.open(, 'H5F_ACC_RDWR', 'H5P_DEFAULT')
+
+str_arr = {'Hello', 'World', 'foo', 'bar'} % values to write, a cell array of strings of any length
+
+sid = H5S.create_simple(1, numel(str_arr), H5ML.get_constant_value('H5S_UNLIMITED'));
+
+tid = H5T.copy('H5T_C_S1');
+H5T.set_size(tid, 'H5T_VARIABLE');
+
+pid = H5P.create('H5P_DATASET_CREATE');
+H5P.set_chunk(pid, 2);
+
+did = H5D.create(fid, , tid, sid, pid)
+
+H5D.write(did, tid, 'H5S_ALL', 'H5S_ALL', 'H5P_DEFAULT', str_arr)
+```
+**Python**
+```python
+array = numpy.array().astype('O') # A list of strings must be converted to a NumPy list with dtype 'O'
+file = h5py.File(, 'r+')
+varlen_str_dtype = h5py.string_dtype(encoding='ascii', length=None)
+file.create_dataset(, dtype=varlen_str_dtype, data=array)
+```
+#### numeric array `[,...]` or `[[,...]]`
+**MATLAB**
+> Note: Because MATLAB has no notion of arrays with fewer than 2 dimensions, using `size(data)` as the 3rd argument of
+`h5create` will erroneously save arrays with 1 dimension as a row or column vector of 2 dimensions. In the 1D case, use `length(data)` as the 3rd argument of `h5create`.
+```matlab
+data =
+h5create(, , length(data) / size(data), 'Datatype', 'double')
+h5write(, , data)
+```
+**Python**
+```python
+array = numpy.array().astype(numpy.float64) # A list or nested list of values should be converted to a NumPy array
+file = h5py.File(, 'r+')
+file.create_dataset(, dtype='f8', data=array)
+```
+
+#### integer array `[,...]` or `[[,...]]`
+
+**MATLAB**
+> Note: Because MATLAB has no notion of arrays with fewer than 2 dimensions, using `size(data)` as the 3rd argument of
+`h5create` will erroneously save arrays with 1 dimension as a row or column vector of 2 dimensions. In the 1D case, use `length(data)` as the 3rd argument of `h5create`.
+```matlab
+data =
+h5create(, , length(data) / size(data), 'Datatype', 'int32')
+h5write(, , data)
+```
+**Python**
+```python
+array = numpy.array().astype(int) # A list or nested list of values should be converted to a NumPy array
+file = h5py.File(, 'r+')
+file.create_dataset(, dtype='i4', data=array)
+```
+
+## Acknowledgement
+
+This document was originally drafted by Blaise Frederic (bbfrederick at
+mclean.harvard.edu) and David Boas (dboas at bu.edu).
+
+Other significant contributors to this specification include:
+- Theodore Huppert (huppert1 at pitt.edu)
+- Jay Dubb (jdubb at bu.edu)
+- Qianqian Fang (q.fang at neu.edu)
+
+The following individuals representing academic, industrial, software, and
+hardware interests are also contributing to and supporting the adoption of this
+specification:
+
+### Software
+- Ata Akin, Acibadem University
+- Hasan Ayaz, Drexel University
+- Joe Culver, University of Washington, neuroDOT
+- Hamid Deghani, University of Birmingham, NIRFAST
+- Adam Eggebrecht, University of Washington, neuroDOT
+- Christophe Grova, McGill University, NIRSTORM
+- Felipe Orihuela-Espina, Instituto Nacional de Astrofisica, Optica y Electronica, ICNNA
+- Luca Pollonini, Houston Methodist, Phoebe
+- Sungho Tak, Korea Basic Science Institute, NIRS-SPM
+- Alessandro Torricelli, Politecnico di Milano
+- Stanislaw Wojtkiewicz, University of Birmingham, NIRFAST
+- Robert Luke, Macquarie University, MNE-NIRS
+- Stephen Tucker, Boston University
+- Michael Luhrs, Maastricht University, Brain Innovation B.V., Satori
+- Robert Oostenveld, Radboud University, FieldTrip
+
+### Hardware
+- Hirokazu Asaka, Hitachi
+- Rob Cooper, Gower Labs Inc
+- Mathieu Coursolle, Rogue Research
+- Rueben Hill, Gower Labs Inc
+- Jorn Horschig, Artinis Medical Systems B.V.
+- Takumi Inakazu, Hitachi
+- Lamija Pasalic, NIRx
+- Davood Tashayyod, fNIR Devices and Biopac Inc
+- Hanseok Yun, OBELAB Inc
+- Zahra M. Aghajan, Kernel
diff --git a/tests/data/Simple_Probe_measList.snirf b/tests/data/Simple_Probe_measList.snirf
deleted file mode 100644
index 4f5266f..0000000
Binary files a/tests/data/Simple_Probe_measList.snirf and /dev/null differ
diff --git a/tests/data/Simple_Probe_measLists.snirf b/tests/data/Simple_Probe_measLists.snirf
deleted file mode 100644
index c612b12..0000000
Binary files a/tests/data/Simple_Probe_measLists.snirf and /dev/null differ
diff --git a/tests/data/sub-01_task-inclusion_nirs.snirf b/tests/data/v120dev-sub-01_task-inclusion_nirs.snirf
similarity index 99%
rename from tests/data/sub-01_task-inclusion_nirs.snirf
rename to tests/data/v120dev-sub-01_task-inclusion_nirs.snirf
index b9f2957..dc8c50c 100644
Binary files a/tests/data/sub-01_task-inclusion_nirs.snirf and b/tests/data/v120dev-sub-01_task-inclusion_nirs.snirf differ
diff --git a/tests/data/sub-02_task-test_nirs.snirf b/tests/data/v120dev-sub-02_task-test_nirs.snirf
similarity index 99%
rename from tests/data/sub-02_task-test_nirs.snirf
rename to tests/data/v120dev-sub-02_task-test_nirs.snirf
index 8907bd1..7514e8d 100644
Binary files a/tests/data/sub-02_task-test_nirs.snirf and b/tests/data/v120dev-sub-02_task-test_nirs.snirf differ
diff --git a/tests/data/sub-A_task-test_run-1.snirf b/tests/data/v120dev-sub-A_task-test_run-1.snirf
similarity index 89%
rename from tests/data/sub-A_task-test_run-1.snirf
rename to tests/data/v120dev-sub-A_task-test_run-1.snirf
index 4941b64..0fe81ad 100644
Binary files a/tests/data/sub-A_task-test_run-1.snirf and b/tests/data/v120dev-sub-A_task-test_run-1.snirf differ
diff --git a/tests/test.py b/tests/test.py
index 2c85364..7f3c0e8 100644
--- a/tests/test.py
+++ b/tests/test.py
@@ -1,6 +1,4 @@
import unittest
-import snirf
-from snirf import Snirf, validateSnirf, loadSnirf, saveSnirf
import h5py
import os
import sys
@@ -10,6 +8,13 @@
from collections.abc import Set, Mapping
import numpy as np
import shutil
+try:
+ import snirf
+ from snirf import Snirf, validateSnirf, loadSnirf, saveSnirf
+except ImportError:
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
+ import snirf
+ from snirf import Snirf, validateSnirf, loadSnirf, saveSnirf
VERBOSE = True # Additional print statements in each test
@@ -147,6 +152,111 @@ def _print_keys(group):
class PySnirf2_Test(unittest.TestCase):
+ def test_validate_datatypes(self):
+ """
+ Test validation methods for dataType and dataType labels
+ """
+ for i, mode in enumerate([False, True]):
+ for file in self._test_files:
+ with Snirf(file, 'r+', dynamic_loading=mode) as s:
+ if len(s.nirs[0].data) == 1 and len(s.nirs[0].data[0].measurementList) > 1:
+ s.nirs[0].data[0].measurementList[0].dataType = -100
+ if VERBOSE:
+ s.validate().display(severity=3)
+ self.assertTrue('UNRECOGNIZED_DATA_TYPE' in [err.name for err in s.validate().errors], msg='Failed to raise dataType error')
+ s.nirs[0].data[0].measurementList[0].dataType = 99999
+ s.nirs[0].data[0].measurementList[0].dataTypeLabel = 'bar'
+ if VERBOSE:
+ s.validate().display(severity=3)
+ self.assertTrue('UNRECOGNIZED_DATA_TYPE_LABEL' in [err.name for err in s.validate().errors], msg='Failed to raise dataTypeLabel error')
+ s.measurementList_to_measurementLists()
+ if VERBOSE:
+ s.validate().display(severity=3)
+ self.assertTrue('UNRECOGNIZED_DATA_TYPE_LABEL' in [err.name for err in s.validate().errors], msg='Failed to raise dataTypeLabel error after converting to measurementLists')
+ s.nirs[0].data[0].measurementLists.dataType[-1] = -100
+ self.assertTrue('UNRECOGNIZED_DATA_TYPE' in [err.name for err in s.validate().errors], msg='Failed to raise dataType error after converting to measurementLists')
+
+ def test_validate_measurementList_dimensions(self):
+ """
+ Test validation that measurementList dimensions are consistent with dataTimeSeries
+ """
+ for i, mode in enumerate([False, True]):
+ for file in self._test_files:
+ # Test measurementList(s) length validation
+ with Snirf(file, 'r+', dynamic_loading=mode) as s:
+ if len(s.nirs[0].data) == 1 and len(s.nirs[0].data[0].measurementList) > 1:
+ self.assertTrue(s.validate(), msg="Failed to validate SNIRF object")
+ s.nirs[0].data[0].measurementList.appendGroup()
+ if VERBOSE:
+ s.validate().display(severity=3)
+ self.assertTrue('INVALID_MEASUREMENTLIST' in [err.name for err in s.validate().errors], msg='Failed to raise measurementList length error')
+ new_path = file.split('.')[0] + '_invalid_ml.snirf'
+ s.save(new_path)
+ self.assertTrue('INVALID_MEASUREMENTLIST' in [err.name for err in validateSnirf(new_path).errors], msg='Failed to raise measurementList length error')
+ with Snirf(file, 'r+', dynamic_loading=mode) as s:
+ if len(s.nirs[0].data) >= 1 and len(s.nirs[0].data[0].measurementList) > 0:
+ s.measurementList_to_measurementLists()
+ wli = s.nirs[0].data[0].measurementLists.wavelengthIndex
+ s.nirs[0].data[0].measurementLists.wavelengthIndex = np.concatenate([wli, [0]])
+ self.assertTrue('INVALID_MEASUREMENTLISTS' in [err.name for err in s.validate().errors], msg='Failed to raise measurementList length error')
+ # Test measurementList(s) value validation
+ with Snirf(file, 'r+', dynamic_loading=mode) as s:
+ if len(s.nirs[0].data) >= 1 and len(s.nirs[0].data[0].measurementList) > 0:
+ s.nirs[0].data[0].measurementList[0].wavelengthIndex = 999_999_999_999 # Unreasonable values
+ s.nirs[0].data[0].measurementList[0].sourceIndex = -1
+ s.nirs[0].data[0].measurementList[0].detectorIndex = 999_999_999_999
+ if VERBOSE:
+ s.validate().display(severity=3)
+ errs = [err.name for err in s.validate().errors]
+ self.assertTrue('INVALID_WAVELENGTH_INDEX' in errs, msg='Failed to raise wavelengthIndex error')
+ self.assertTrue('INVALID_SOURCE_INDEX' in errs, msg='Failed to raise sourceIndex error')
+ self.assertTrue('INVALID_DETECTOR_INDEX' in errs, msg='Failed to raise detectorIndex error')
+ with Snirf(file, 'r+', dynamic_loading=mode) as s:
+ if len(s.nirs[0].data) >= 1 and len(s.nirs[0].data[0].measurementList) > 0:
+ s.measurementList_to_measurementLists()
+ s.nirs[0].data[0].measurementLists.wavelengthIndex[0] = 999_999_999_999
+ s.nirs[0].data[0].measurementLists.sourceIndex[0] = -1
+ s.nirs[0].data[0].measurementLists.detectorIndex[0] = 999_999_999_999
+ if VERBOSE:
+ s.validate().display(severity=3)
+ errs = [err.name for err in s.validate().errors]
+ self.assertTrue('INVALID_WAVELENGTH_INDEX' in errs, msg='Failed to raise wavelengthIndex error')
+ self.assertTrue('INVALID_SOURCE_INDEX' in errs, msg='Failed to raise sourceIndex error')
+ self.assertTrue('INVALID_DETECTOR_INDEX' in errs, msg='Failed to raise detectorIndex error')
+
+
+ def test_validate_measurementList_conversion(self):
+ """
+ Validate that measurementList can be converted to measurementLists and back
+
+ Also tests that Groups can be deleted from files on disk
+ """
+ for i, mode in enumerate([False, True]):
+ for file in self._test_files:
+ with Snirf(file, dynamic_loading=mode) as s:
+ if len(s.nirs[0].data) >= 1 and len(s.nirs[0].data[0].measurementList) > 0: # Subset of test data?
+ if VERBOSE:
+ print('Converting measurementList', file, 'to measurementLists')
+ s.nirs[0].data[0].measurementList_to_measurementLists()
+ del s.nirs[0].data[0].measurementList[:]
+ new_path = file.split('.')[0] + '_converted_to_measurementLists.snirf'
+ if VERBOSE:
+ s.validate().display(severity=3)
+ self.assertTrue(s.validate(), msg="Failed to validate SNIRF object after conversion to measurementLists")
+ if VERBOSE:
+ print('Writing file to', new_path)
+ s.save(new_path)
+ self.assertTrue(validateSnirf(new_path), msg="Failed to validate file on disk after conversion to measurementLists")
+ with Snirf(new_path, dynamic_loading=mode) as s:
+ if VERBOSE:
+ print('Converting measurementLists in', new_path, 'back to measurementList')
+ s.nirs[0].data[0].measurementLists_to_measurementList()
+ del s.nirs[0].data[0].measurementLists
+ self.assertTrue(s.validate(), msg="Failed to validate file after conversion back to measurementList")
+ s.save()
+ print('Checking to see if conversion is reversible...')
+ dataset_equal_test(self, file, new_path)
+
def test_multidimensional_aux(self):
"""
Test to ensure the validator permits multidimensional aux
@@ -163,10 +273,9 @@ def test_multidimensional_aux(self):
print("Created new aux channel:", s.nirs[0].aux[-1])
s.save()
if VERBOSE:
-
s.validate().display()
- self.assertTrue(s.validate(), msg="Incorrectly invalidated multidimensional aux signal!")
- self.assertTrue(validateSnirf(file), msg="Incorrectly invalidated multidimensional aux signal in file on disk!")
+ self.assertTrue(s.validate(), msg="Incorrectly invalidated multidimensional aux signal")
+ self.assertTrue(validateSnirf(file), msg="Incorrectly invalidated multidimensional aux signal in file on disk")
def test_assignment(self):
"""
@@ -182,6 +291,8 @@ def test_assignment(self):
print('Loading', file, 'with dynamic_loading=' + str(mode))
# Reassignment of same probe
with Snirf(file, 'r+', dynamic_loading=mode) as s:
+ if len(s.nirs[0].data[0].measurementList) < 1:
+ continue # skip cases without measurementList
same_probe = s.nirs[0].probe
self.assertTrue(isinstance(same_probe, snirf.Probe), msg="Could not assign Probe reference")
same_probe.sourcePos3D = np.random.random([31, 3])
@@ -385,7 +496,7 @@ def test_unspecified_metadatatags(self):
s.nirs[0].metaDataTags.add('foo', 'Hello')
s.nirs[0].metaDataTags.add('Bar', 'World')
s.nirs[0].metaDataTags.add('_array_of_strings', ['foo', 'bar'])
- self.assertTrue(s.validate(), msg='adding the unspecified metaDataTags resulted in an INVALID file...')
+ self.assertTrue(s.validate(), msg='adding the unspecified metaDataTags resulted in an INVALID file')
self.assertTrue(s.nirs[0].metaDataTags.foo == 'Hello', msg='Failed to set the unspecified metadatatags')
self.assertTrue(s.nirs[0].metaDataTags.Bar == 'World', msg='Failed to set the unspecified metadatatags')
self.assertTrue(s.nirs[0].metaDataTags._array_of_strings[0] == 'foo', msg='Failed to set the unspecified metadatatags')
@@ -540,6 +651,9 @@ def test_validator_invalid_measurement_list(self):
if VERBOSE:
print('Loading', file + '.snirf', 'with dynamic_loading=' + str(mode))
s = Snirf(file, 'r+', dynamic_loading=mode)
+ if len(s.nirs[0].data[0].measurementList) < 1:
+ s.close()
+ continue # skip cases without measurementList
s.nirs[0].data[0].measurementList.appendGroup() # Add extra ml
if VERBOSE:
print('Performing local validation on invalid ml', s)
@@ -580,11 +694,9 @@ def test_edit_probe_group(self):
'S5_A', 'S6_A', 'S7_A', 'S8_A',
'S9_A', 'S10_A', 'S11_A', 'S12_A',
'S13_A', 'S14_A', 'S15_A']
- desired_probe_uselocalindex = 1
desired_probe_sourcepos3d = np.random.random([31, 3])
s.nirs[0].probe.sourceLabels = desired_probe_sourcelabels
- s.nirs[0].probe.useLocalIndex = desired_probe_uselocalindex
s.nirs[0].probe.sourcePos3D = desired_probe_sourcepos3d
snirf_save_file = file.split('.')[0] + '_edited_snirf_save.snirf'
@@ -602,7 +714,6 @@ def test_edit_probe_group(self):
s2 = Snirf(edited_filename, 'r+', dynamic_loading=mode)
self.assertTrue((s2.nirs[0].probe.sourceLabels == desired_probe_sourcelabels).all(), msg='Failed to edit sourceLabels properly in ' + edited_filename)
- self.assertTrue(s2.nirs[0].probe.useLocalIndex == desired_probe_uselocalindex, msg='Failed to edit sourceLabels properly in ' + edited_filename)
self.assertTrue((s2.nirs[0].probe.sourcePos3D == desired_probe_sourcepos3d).all(), msg='Failed to edit sourceLabels properly in ' + edited_filename)
s2.close()