diff --git a/.github/workflows/minimal.yml b/.github/workflows/minimal.yml
index 63bc97d157..2cc0213781 100644
--- a/.github/workflows/minimal.yml
+++ b/.github/workflows/minimal.yml
@@ -15,7 +15,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Setup Miniconda
- uses: conda-incubator/setup-miniconda@v2.2.0
+ uses: conda-incubator/setup-miniconda@v3.0.1
with:
channels: conda-forge
environment-file: environment.yml
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index cdf230bc7c..0c3c49d78d 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -42,7 +42,7 @@ jobs:
with:
fetch-depth: 0
- name: Setup Miniconda
- uses: conda-incubator/setup-miniconda@v2.2.0
+ uses: conda-incubator/setup-miniconda@v3.0.1
with:
channels: conda-forge
python-version: ${{ matrix.python-version }}
diff --git a/.github/workflows/releases.yml b/.github/workflows/releases.yml
index c08bfc6677..3bd25bfbf7 100644
--- a/.github/workflows/releases.yml
+++ b/.github/workflows/releases.yml
@@ -64,7 +64,7 @@ jobs:
with:
name: releases
path: dist
- - uses: pypa/gh-action-pypi-publish@v1.8.10
+ - uses: pypa/gh-action-pypi-publish@v1.8.11
with:
user: __token__
password: ${{ secrets.pypi_password }}
diff --git a/.github/workflows/windows-testing.yml b/.github/workflows/windows-testing.yml
index 3afa8c467e..eeee5b704d 100644
--- a/.github/workflows/windows-testing.yml
+++ b/.github/workflows/windows-testing.yml
@@ -21,7 +21,7 @@ jobs:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- - uses: conda-incubator/setup-miniconda@v2.2.0
+ - uses: conda-incubator/setup-miniconda@v3.0.1
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
diff --git a/.gitignore b/.gitignore
index a6a456636d..7de405d8a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -51,6 +51,7 @@ coverage.xml
# Sphinx documentation
docs/_build/
+docs/_autoapi/
# PyBuilder
target/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f22dc39832..e985d24000 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -14,7 +14,7 @@ repos:
# Respect `exclude` and `extend-exclude` settings.
args: ["--force-exclude"]
- repo: https://github.com/psf/black
- rev: 22.12.0
+ rev: 23.10.1
hooks:
- id: black
- repo: https://github.com/codespell-project/codespell
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 2124f77271..08cac8d78d 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -7,6 +7,7 @@ build:
sphinx:
configuration: docs/conf.py
+ fail_on_warning: true
python:
install:
@@ -14,3 +15,5 @@ python:
path: .
extra_requirements:
- docs
+
+formats: all
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
deleted file mode 100644
index f07035c69f..0000000000
--- a/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at zarr.conduct@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [https://www.contributor-covenant.org/version/1/4][version]
-
-[homepage]: https://www.contributor-covenant.org
-[version]: https://www.contributor-covenant.org/version/1/4
diff --git a/bench/compress_normal.py b/bench/compress_normal.py
index 9f1655541c..803d54b76b 100644
--- a/bench/compress_normal.py
+++ b/bench/compress_normal.py
@@ -8,7 +8,6 @@
from zarr import blosc
if __name__ == "__main__":
-
sys.path.insert(0, "..")
# setup
diff --git a/docs/Makefile b/docs/Makefile
index f279d820c6..e6adc1ca8c 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -2,7 +2,7 @@
#
# You can set these variables from the command line.
-SPHINXOPTS =
+SPHINXOPTS = -W --keep-going
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
diff --git a/docs/api/core.rst b/docs/api/core.rst
index c4075fdb30..b310460e51 100644
--- a/docs/api/core.rst
+++ b/docs/api/core.rst
@@ -1,24 +1,5 @@
The Array class (``zarr.core``)
===============================
-.. module:: zarr.core
-.. autoclass:: Array
-
- .. automethod:: __getitem__
- .. automethod:: __setitem__
- .. automethod:: get_basic_selection
- .. automethod:: set_basic_selection
- .. automethod:: get_mask_selection
- .. automethod:: set_mask_selection
- .. automethod:: get_block_selection
- .. automethod:: set_block_selection
- .. automethod:: get_coordinate_selection
- .. automethod:: set_coordinate_selection
- .. automethod:: get_orthogonal_selection
- .. automethod:: set_orthogonal_selection
- .. automethod:: digest
- .. automethod:: hexdigest
- .. automethod:: resize
- .. automethod:: append
- .. automethod:: view
- .. automethod:: astype
+.. automodapi:: zarr.core
+ :no-heading:
diff --git a/docs/conf.py b/docs/conf.py
index 1ffaeddef4..318843a9fb 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -42,6 +42,7 @@
"sphinx.ext.autosummary",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
+ "sphinx_automodapi.automodapi",
"numpydoc",
"sphinx_issues",
"sphinx_copybutton",
@@ -52,6 +53,9 @@
numpydoc_class_members_toctree = False
issues_github_path = "zarr-developers/zarr-python"
+automodapi_inheritance_diagram = False
+automodapi_toctreedirnm = "_autoapi"
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@@ -144,6 +148,7 @@
},
],
"collapse_navigation": True,
+ "navigation_with_keys": False,
}
# Add any paths that contain custom themes here, relative to this directory.
@@ -331,6 +336,7 @@ def setup(app):
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
+ "numcodecs": ("https://numcodecs.readthedocs.io/en/stable/", None),
}
diff --git a/docs/contributing.rst b/docs/contributing.rst
index 0420535093..91606b7276 100644
--- a/docs/contributing.rst
+++ b/docs/contributing.rst
@@ -85,9 +85,9 @@ Creating a development environment
To work with the Zarr source code, it is recommended to set up a Python virtual
environment and install all Zarr dependencies using the same versions as are used by
the core developers and continuous integration services. Assuming you have a Python
-3 interpreter already installed, and have also installed the virtualenv package, and
-you have cloned the Zarr source code and your current working directory is the root of
-the repository, you can do something like the following::
+3 interpreter already installed, and you have cloned the Zarr source code and your
+current working directory is the root of the repository, you can do something like
+the following::
$ mkdir -p ~/pyenv/zarr-dev
$ python -m venv ~/pyenv/zarr-dev
diff --git a/docs/index.rst b/docs/index.rst
index 97f5889ca5..06f79b7e7c 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -19,7 +19,7 @@ Zarr-Python
**Version**: |version|
-**Download documentation**: `Zipped HTML `_
+**Download documentation**: `PDF/Zipped HTML/EPUB `_
**Useful links**:
`Installation `_ |
@@ -60,6 +60,7 @@ Zarr is a file storage format for chunked, compressed, N-dimensional arrays base
+++
.. button-ref:: tutorial
+ :ref-type: ref
:expand:
:color: dark
:click-parent:
diff --git a/docs/release.rst b/docs/release.rst
index 2f9b93a361..842c36e290 100644
--- a/docs/release.rst
+++ b/docs/release.rst
@@ -18,9 +18,37 @@ Release notes
Unreleased
----------
+Docs
+~~~~
+
+* Minor correction and changes in documentation.
+ By :user:`Sanket Verma ` :issue:`1509`.
+
+* Fix typo in documentation.
+ By :user:`Dimitri Papadopoulos Orfanos ` :issue:`1554`
+
+* The documentation build now fails if there are any warnings.
+ By :user:`David Stansby ` :issue:`1548`.
+
+* Add links to ``numcodecs`` docs in the tutorial.
+ By :user:`David Stansby ` :issue:`1535`.
+
+* Enable offline formats for documentation builds.
+ By :user:`Sanket Verma ` :issue:`1551`.
+
+* Minor tweak to advanced indexing tutorial examples.
+ By :user:`Ross Barnowski ` :issue:`1550`.
+
+
Maintenance
~~~~~~~~~~~
+* Cache result of ``FSStore._fsspec_installed()``.
+ By :user:`Janick Martinez Esturo ` :issue:`1581`.
+
+* Extend copyright notice to 2023.
+ By :user:`Jack Kelly ` :issue:`1528`.
+
* Change occurrence of ``io.open()`` into ``open()``.
By :user:`Dimitri Papadopoulos Orfanos ` :issue:`1421`.
@@ -33,6 +61,10 @@ Maintenance
* Allow ``black`` code formatter to be run with any Python version.
By :user:`David Stansby ` :issue:`1549`.
+* Remove ``sphinx-rtd-theme`` dependency from ``pyproject.toml``.
+ By :user:`Sanket Verma ` :issue:`1563`.
+
+
.. _release_2.16.1:
2.16.1
@@ -161,10 +193,12 @@ Major changes
* Improve Zarr V3 support, adding partial store read/write and storage transformers.
Add new features from the `v3 spec `_:
+
* storage transformers
* `get_partial_values` and `set_partial_values`
* efficient `get_partial_values` implementation for `FSStoreV3`
* sharding storage transformer
+
By :user:`Jonathan Striebel `; :issue:`1096`, :issue:`1111`.
* N5 nows supports Blosc.
diff --git a/docs/tutorial.rst b/docs/tutorial.rst
index f335db18d0..4099bac1c8 100644
--- a/docs/tutorial.rst
+++ b/docs/tutorial.rst
@@ -480,17 +480,17 @@ Indexing with coordinate arrays
Items from a Zarr array can be extracted by providing an integer array of
coordinates. E.g.::
- >>> z = zarr.array(np.arange(10))
+ >>> z = zarr.array(np.arange(10) ** 2)
>>> z[:]
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> z.get_coordinate_selection([1, 4])
- array([1, 4])
+ array([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
+ >>> z.get_coordinate_selection([2, 5])
+ array([ 4, 25])
Coordinate arrays can also be used to update data, e.g.::
- >>> z.set_coordinate_selection([1, 4], [-1, -2])
+ >>> z.set_coordinate_selection([2, 5], [-1, -2])
>>> z[:]
- array([ 0, -1, 2, 3, -2, 5, 6, 7, 8, 9])
+ array([ 0, 1, -1, 9, 16, -2, 36, 49, 64, 81])
For multidimensional arrays, coordinates must be provided for each dimension,
e.g.::
@@ -534,17 +534,17 @@ Indexing with a mask array
Items can also be extracted by providing a Boolean mask. E.g.::
- >>> z = zarr.array(np.arange(10))
+ >>> z = zarr.array(np.arange(10) ** 2)
>>> z[:]
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ array([ 0, 1, 4, 9, 16, 25, 36, 49, 64, 81])
>>> sel = np.zeros_like(z, dtype=bool)
- >>> sel[1] = True
- >>> sel[4] = True
+ >>> sel[2] = True
+ >>> sel[5] = True
>>> z.get_mask_selection(sel)
- array([1, 4])
+ array([ 4, 25])
>>> z.set_mask_selection(sel, [-1, -2])
>>> z[:]
- array([ 0, -1, 2, 3, -2, 5, 6, 7, 8, 9])
+ array([ 0, 1, -1, 9, 16, -2, 36, 49, 64, 81])
Here's a multidimensional example::
@@ -986,7 +986,7 @@ It is also possible to initialize the filesystem outside of Zarr and then pass
it through. This requires creating an :class:`zarr.storage.FSStore` object
explicitly. For example::
- >>> import s3fs * doctest: +SKIP
+ >>> import s3fs # doctest: +SKIP
>>> fs = s3fs.S3FileSystem(anon=True) # doctest: +SKIP
>>> store = zarr.storage.FSStore('/zarr-demo/store', fs=fs) # doctest: +SKIP
>>> g = zarr.open_group(store) # doctest: +SKIP
@@ -1175,8 +1175,9 @@ A fixed-length unicode dtype is also available, e.g.::
For variable-length strings, the ``object`` dtype can be used, but a codec must be
provided to encode the data (see also :ref:`tutorial_objects` below). At the time of
writing there are four codecs available that can encode variable length string
-objects: :class:`numcodecs.VLenUTF8`, :class:`numcodecs.JSON`, :class:`numcodecs.MsgPack`.
-and :class:`numcodecs.Pickle`. E.g. using ``VLenUTF8``::
+objects: :class:`numcodecs.vlen.VLenUTF8`, :class:`numcodecs.json.JSON`,
+:class:`numcodecs.msgpacks.MsgPack`. and :class:`numcodecs.pickles.Pickle`.
+E.g. using ``VLenUTF8``::
>>> import numcodecs
>>> z = zarr.array(text_data, dtype=object, object_codec=numcodecs.VLenUTF8())
@@ -1201,8 +1202,8 @@ is a short-hand for ``dtype=object, object_codec=numcodecs.VLenUTF8()``, e.g.::
'Helló, világ!', 'Zdravo svete!', 'เฮลโลเวิลด์'], dtype=object)
Variable-length byte strings are also supported via ``dtype=object``. Again an
-``object_codec`` is required, which can be one of :class:`numcodecs.VLenBytes` or
-:class:`numcodecs.Pickle`. For convenience, ``dtype=bytes`` (or ``dtype=str`` on Python
+``object_codec`` is required, which can be one of :class:`numcodecs.vlen.VLenBytes` or
+:class:`numcodecs.pickles.Pickle`. For convenience, ``dtype=bytes`` (or ``dtype=str`` on Python
2.7) can be used as a short-hand for ``dtype=object, object_codec=numcodecs.VLenBytes()``,
e.g.::
@@ -1218,7 +1219,7 @@ e.g.::
b'\xe0\xb9\x80\xe0\xb8\xae\xe0\xb8\xa5\xe0\xb9\x82\xe0\xb8\xa5\xe0\xb9\x80\xe0\xb8\xa7\xe0\xb8\xb4\xe0\xb8\xa5\xe0\xb8\x94\xe0\xb9\x8c'], dtype=object)
If you know ahead of time all the possible string values that can occur, you could
-also use the :class:`numcodecs.Categorize` codec to encode each unique string value as an
+also use the :class:`numcodecs.categorize.Categorize` codec to encode each unique string value as an
integer. E.g.::
>>> categorize = numcodecs.Categorize(greetings, dtype=object)
@@ -1245,7 +1246,7 @@ The best codec to use will depend on what type of objects are present in the arr
At the time of writing there are three codecs available that can serve as a general
purpose object codec and support encoding of a mixture of object types:
-:class:`numcodecs.JSON`, :class:`numcodecs.MsgPack`. and :class:`numcodecs.Pickle`.
+:class:`numcodecs.json.JSON`, :class:`numcodecs.msgpacks.MsgPack`. and :class:`numcodecs.pickles.Pickle`.
For example, using the JSON codec::
@@ -1258,7 +1259,7 @@ For example, using the JSON codec::
array([42, 'foo', list(['bar', 'baz', 'qux']), {'a': 1, 'b': 2.2}, None], dtype=object)
Not all codecs support encoding of all object types. The
-:class:`numcodecs.Pickle` codec is the most flexible, supporting encoding any type
+:class:`numcodecs.pickles.Pickle` codec is the most flexible, supporting encoding any type
of Python object. However, if you are sharing data with anyone other than yourself, then
Pickle is not recommended as it is a potential security risk. This is because malicious
code can be embedded within pickled data. The JSON and MsgPack codecs do not have any
@@ -1270,7 +1271,7 @@ Ragged arrays
If you need to store an array of arrays, where each member array can be of any length
and stores the same primitive type (a.k.a. a ragged array), the
-:class:`numcodecs.VLenArray` codec can be used, e.g.::
+:class:`numcodecs.vlen.VLenArray` codec can be used, e.g.::
>>> z = zarr.empty(4, dtype=object, object_codec=numcodecs.VLenArray(int))
>>> z
diff --git a/pyproject.toml b/pyproject.toml
index 292bfddded..22ea19f28f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -45,10 +45,10 @@ jupyter = [
]
docs = [
'sphinx',
+ 'sphinx-automodapi',
'sphinx_design',
'sphinx-issues',
'sphinx-copybutton',
- 'sphinx-rtd-theme',
'pydata-sphinx-theme',
'numpydoc',
'numcodecs[msgpack]',
diff --git a/requirements_dev_minimal.txt b/requirements_dev_minimal.txt
index 725e95a8af..e2be6eb825 100644
--- a/requirements_dev_minimal.txt
+++ b/requirements_dev_minimal.txt
@@ -3,6 +3,6 @@ asciitree==0.3.3
fasteners==0.19
numcodecs==0.11.0
msgpack-python==0.5.6
-setuptools-scm==7.1.0
+setuptools-scm==8.0.4
# test requirements
-pytest==7.4.0
+pytest==7.4.3
diff --git a/requirements_dev_optional.txt b/requirements_dev_optional.txt
index fda3fcc6e5..f3ea80a546 100644
--- a/requirements_dev_optional.txt
+++ b/requirements_dev_optional.txt
@@ -8,7 +8,7 @@ ipywidgets==8.1.0
# don't let pyup change pinning for azure-storage-blob, need to pin to older
# version to get compatibility with azure storage emulator on appveyor (FIXME)
azure-storage-blob==12.16.0 # pyup: ignore
-redis==4.6.0
+redis==5.0.1
types-redis
types-setuptools
pymongo==4.5.0
@@ -16,8 +16,8 @@ pymongo==4.5.0
coverage
pytest-cov==4.1.0
pytest-doctestplus==1.0.0
-pytest-timeout==2.1.0
-h5py==3.9.0
-fsspec==2023.6.0
-s3fs==2023.6.0
+pytest-timeout==2.2.0
+h5py==3.10.0
+fsspec==2023.10.0
+s3fs==2023.10.0
moto[server]>=4.0.8
diff --git a/zarr/_storage/absstore.py b/zarr/_storage/absstore.py
index f62529f096..c9a113148c 100644
--- a/zarr/_storage/absstore.py
+++ b/zarr/_storage/absstore.py
@@ -87,7 +87,7 @@ def __init__(
"https://{}.blob.core.windows.net/".format(account_name),
container,
credential=account_key,
- **blob_service_kwargs
+ **blob_service_kwargs,
)
self.client = client
@@ -240,7 +240,6 @@ def __setitem__(self, key, value):
super().__setitem__(key, value)
def rmdir(self, path=None):
-
if not path:
# Currently allowing clear to delete everything as in v2
diff --git a/zarr/_storage/store.py b/zarr/_storage/store.py
index 9cd07bba0b..667ca38147 100644
--- a/zarr/_storage/store.py
+++ b/zarr/_storage/store.py
@@ -628,7 +628,6 @@ def _rmdir_from_keys(store: StoreLike, path: Optional[str] = None) -> None:
def _rmdir_from_keys_v3(store: StoreV3, path: str = "") -> None:
-
meta_dir = meta_root + path
meta_dir = meta_dir.rstrip("/")
_rmdir_from_keys(store, meta_dir)
diff --git a/zarr/_storage/v3.py b/zarr/_storage/v3.py
index 00dc085dac..32e78f7a34 100644
--- a/zarr/_storage/v3.py
+++ b/zarr/_storage/v3.py
@@ -118,7 +118,6 @@ def _get_files_and_dirs_from_path(store, path):
class FSStoreV3(FSStore, StoreV3):
-
# FSStoreV3 doesn't use this (FSStore uses it within _normalize_key)
_META_KEYS = ()
diff --git a/zarr/attrs.py b/zarr/attrs.py
index 01fc617b3c..e967c5b853 100644
--- a/zarr/attrs.py
+++ b/zarr/attrs.py
@@ -26,7 +26,6 @@ class Attributes(MutableMapping):
"""
def __init__(self, store, key=".zattrs", read_only=False, cache=True, synchronizer=None):
-
self._version = getattr(store, "_store_version", 2)
_Store = Store if self._version == 2 else StoreV3
self.store = _Store._ensure_store(store)
@@ -73,7 +72,6 @@ def __getitem__(self, item):
return self.asdict()[item]
def _write_op(self, f, *args, **kwargs):
-
# guard condition
if self.read_only:
raise PermissionError("attributes are read-only")
@@ -89,7 +87,6 @@ def __setitem__(self, item, value):
self._write_op(self._setitem_nosync, item, value)
def _setitem_nosync(self, item, value):
-
# load existing data
d = self._get_nosync()
@@ -106,7 +103,6 @@ def __delitem__(self, item):
self._write_op(self._delitem_nosync, item)
def _delitem_nosync(self, key):
-
# load existing data
d = self._get_nosync()
@@ -128,7 +124,6 @@ def put(self, d):
self._write_op(self._put_nosync, dict(attributes=d))
def _put_nosync(self, d):
-
d_to_check = d if self._version == 2 else d["attributes"]
if not all(isinstance(item, str) for item in d_to_check):
# TODO: Raise an error for non-string keys
@@ -178,7 +173,6 @@ def update(self, *args, **kwargs):
self._write_op(self._update_nosync, *args, **kwargs)
def _update_nosync(self, *args, **kwargs):
-
# load existing data
d = self._get_nosync()
diff --git a/zarr/convenience.py b/zarr/convenience.py
index 0ee8a8d323..9c0deeea47 100644
--- a/zarr/convenience.py
+++ b/zarr/convenience.py
@@ -675,10 +675,8 @@ def copy_store(
# setup logging
with _LogWriter(log) as log:
-
# iterate over source keys
for source_key in sorted(source.keys()):
-
# filter to keys under source path
if source_store_version == 2:
if not source_key.startswith(source_path):
@@ -757,7 +755,7 @@ def copy(
log=None,
if_exists="raise",
dry_run=False,
- **create_kws
+ **create_kws,
):
"""Copy the `source` array or group into the `dest` group.
@@ -878,7 +876,6 @@ def copy(
# setup logging
with _LogWriter(log) as log:
-
# do the copying
n_copied, n_skipped, n_bytes_copied = _copy(
log,
@@ -890,7 +887,7 @@ def copy(
without_attrs=without_attrs,
if_exists=if_exists,
dry_run=dry_run,
- **create_kws
+ **create_kws,
)
# log a final message with a summary of what happened
@@ -948,12 +945,10 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
# take action
if do_copy:
-
# log a message about what we're going to do
log("copy {} {} {}".format(source.name, source.shape, source.dtype))
if not dry_run:
-
# clear the way
if exists:
del dest[name]
@@ -1038,12 +1033,10 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
# take action
if do_copy:
-
# log action
log("copy {}".format(source.name))
if not dry_run:
-
# clear the way
if exists_array:
del dest[name]
@@ -1056,7 +1049,6 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
grp.attrs.update(source.attrs)
else:
-
# setup for dry run without creating any groups in the
# destination
if dest is not None:
@@ -1076,7 +1068,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
without_attrs=without_attrs,
if_exists=if_exists,
dry_run=dry_run,
- **create_kws
+ **create_kws,
)
n_copied += c
n_skipped += s
@@ -1099,7 +1091,7 @@ def copy_all(
log=None,
if_exists="raise",
dry_run=False,
- **create_kws
+ **create_kws,
):
"""Copy all children of the `source` group into the `dest` group.
@@ -1189,7 +1181,6 @@ def copy_all(
# setup logging
with _LogWriter(log) as log:
-
for k in source.keys():
c, s, b = _copy(
log,
@@ -1201,7 +1192,7 @@ def copy_all(
without_attrs=without_attrs,
if_exists=if_exists,
dry_run=dry_run,
- **create_kws
+ **create_kws,
)
n_copied += c
n_skipped += s
@@ -1262,7 +1253,6 @@ def is_zarr_key(key):
return key.endswith(".zarray") or key.endswith(".zgroup") or key.endswith(".zattrs")
else:
-
assert_zarr_v3_api_available()
sfx = _get_metadata_suffix(store) # type: ignore
diff --git a/zarr/core.py b/zarr/core.py
index 2177e9055c..c07a31e95f 100644
--- a/zarr/core.py
+++ b/zarr/core.py
@@ -60,6 +60,8 @@
ensure_ndarray_like,
)
+__all__ = ["Array"]
+
# noinspection PyUnresolvedReferences
class Array:
@@ -110,62 +112,6 @@ class Array:
to users. Use `numpy.empty(())` by default.
.. versionadded:: 2.13
-
-
- Attributes
- ----------
- store
- path
- name
- read_only
- chunk_store
- shape
- chunks
- dtype
- compression
- compression_opts
- dimension_separator
- fill_value
- order
- synchronizer
- filters
- attrs
- size
- itemsize
- nbytes
- nbytes_stored
- cdata_shape
- nchunks
- nchunks_initialized
- is_view
- info
- vindex
- oindex
- blocks
- write_empty_chunks
- meta_array
-
- Methods
- -------
- __getitem__
- __setitem__
- get_basic_selection
- set_basic_selection
- get_orthogonal_selection
- set_orthogonal_selection
- get_mask_selection
- set_mask_selection
- get_coordinate_selection
- set_coordinate_selection
- get_block_selection
- set_block_selection
- digest
- hexdigest
- resize
- append
- view
- astype
-
"""
def __init__(
diff --git a/zarr/creation.py b/zarr/creation.py
index 726d0b5932..6227f90b7b 100644
--- a/zarr/creation.py
+++ b/zarr/creation.py
@@ -234,7 +234,6 @@ def create(
def _kwargs_compat(compressor, fill_value, kwargs):
-
# to be compatible with h5py, as well as backwards-compatible with Zarr
# 1.x, accept 'compression' and 'compression_opts' keyword arguments
@@ -697,7 +696,6 @@ def open_array(
def _like_args(a, kwargs):
-
shape, chunks = _get_shape_chunks(a)
if shape is not None:
kwargs.setdefault("shape", shape)
diff --git a/zarr/hierarchy.py b/zarr/hierarchy.py
index 3361969f08..1cfea89c81 100644
--- a/zarr/hierarchy.py
+++ b/zarr/hierarchy.py
@@ -145,7 +145,7 @@ def __init__(
synchronizer=None,
zarr_version=None,
*,
- meta_array=None
+ meta_array=None,
):
store: BaseStore = _normalize_store_arg(store, zarr_version=zarr_version)
if zarr_version is None:
@@ -919,7 +919,6 @@ def tree(self, expand=False, level=None):
return TreeViewer(self, expand=expand, level=level)
def _write_op(self, f, *args, **kwargs):
-
# guard condition
if self._read_only:
raise ReadOnlyError()
@@ -1094,7 +1093,6 @@ def create_dataset(self, name, **kwargs):
return self._write_op(self._create_dataset_nosync, name, **kwargs)
def _create_dataset_nosync(self, name, data=None, **kwargs):
-
assert "mode" not in kwargs
path = self._item_path(name)
@@ -1138,11 +1136,9 @@ def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs):
)
def _require_dataset_nosync(self, name, shape, dtype=None, exact=False, **kwargs):
-
path = self._item_path(name)
if contains_array(self._store, path):
-
# array already exists at path, validate that it is the right shape and type
synchronizer = kwargs.get("synchronizer", self._synchronizer)
@@ -1235,7 +1231,7 @@ def _full_nosync(self, name, fill_value, **kwargs):
path=path,
chunk_store=self._chunk_store,
fill_value=fill_value,
- **kwargs
+ **kwargs,
)
def array(self, name, data, **kwargs):
@@ -1361,7 +1357,7 @@ def group(
path=None,
*,
zarr_version=None,
- meta_array=None
+ meta_array=None,
):
"""Create a group.
@@ -1452,7 +1448,7 @@ def open_group(
storage_options=None,
*,
zarr_version=None,
- meta_array=None
+ meta_array=None,
):
"""Open a group using file-mode-like semantics.
diff --git a/zarr/indexing.py b/zarr/indexing.py
index 487cc8b9d9..3042147ebb 100644
--- a/zarr/indexing.py
+++ b/zarr/indexing.py
@@ -111,7 +111,6 @@ def is_pure_orthogonal_indexing(selection, ndim):
def normalize_integer_selection(dim_sel, dim_len):
-
# normalize type to int
dim_sel = int(dim_sel)
@@ -145,7 +144,6 @@ def normalize_integer_selection(dim_sel, dim_len):
class IntDimIndexer:
def __init__(self, dim_sel, dim_len, dim_chunk_len):
-
# normalize
dim_sel = normalize_integer_selection(dim_sel, dim_len)
@@ -169,7 +167,6 @@ def ceildiv(a, b):
class SliceDimIndexer:
def __init__(self, dim_sel, dim_len, dim_chunk_len):
-
# normalize
self.start, self.stop, self.step = dim_sel.indices(dim_len)
if self.step < 1:
@@ -182,14 +179,12 @@ def __init__(self, dim_sel, dim_len, dim_chunk_len):
self.nchunks = ceildiv(self.dim_len, self.dim_chunk_len)
def __iter__(self):
-
# figure out the range of chunks we need to visit
dim_chunk_ix_from = self.start // self.dim_chunk_len
dim_chunk_ix_to = ceildiv(self.stop, self.dim_chunk_len)
# iterate over chunks in range
for dim_chunk_ix in range(dim_chunk_ix_from, dim_chunk_ix_to):
-
# compute offsets for chunk within overall array
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_limit = min(self.dim_len, (dim_chunk_ix + 1) * self.dim_chunk_len)
@@ -237,7 +232,6 @@ def check_selection_length(selection, shape):
def replace_ellipsis(selection, shape):
-
selection = ensure_tuple(selection)
# count number of ellipsis present
@@ -330,14 +324,12 @@ def is_basic_selection(selection):
# noinspection PyProtectedMember
class BasicIndexer:
def __init__(self, selection, array):
-
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_len in zip(selection, array._shape, array._chunks):
-
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
@@ -358,7 +350,6 @@ def __init__(self, selection, array):
def __iter__(self):
for dim_projections in itertools.product(*self.dim_indexers):
-
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(
@@ -370,7 +361,6 @@ def __iter__(self):
class BoolArrayDimIndexer:
def __init__(self, dim_sel, dim_len, dim_chunk_len):
-
# check number of dimensions
if not is_bool_array(dim_sel, 1):
raise IndexError(
@@ -402,10 +392,8 @@ def __init__(self, dim_sel, dim_len, dim_chunk_len):
self.dim_chunk_ixs = np.nonzero(self.chunk_nitems)[0]
def __iter__(self):
-
# iterate over chunks with at least one item
for dim_chunk_ix in self.dim_chunk_ixs:
-
# find region in chunk
dim_offset = dim_chunk_ix * self.dim_chunk_len
dim_chunk_sel = self.dim_sel[dim_offset : dim_offset + self.dim_chunk_len]
@@ -472,7 +460,6 @@ def __init__(
boundscheck=True,
order=Order.UNKNOWN,
):
-
# ensure 1d array
dim_sel = np.asanyarray(dim_sel)
if not is_integer_array(dim_sel, 1):
@@ -526,9 +513,7 @@ def __init__(
self.chunk_nitems_cumsum = np.cumsum(self.chunk_nitems)
def __iter__(self):
-
for dim_chunk_ix in self.dim_chunk_ixs:
-
# find region in output
if dim_chunk_ix == 0:
start = 0
@@ -602,7 +587,6 @@ def oindex_set(a, selection, value):
# noinspection PyProtectedMember
class OrthogonalIndexer:
def __init__(self, selection, array):
-
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
@@ -612,7 +596,6 @@ def __init__(self, selection, array):
# setup per-dimension indexers
dim_indexers = []
for dim_sel, dim_len, dim_chunk_len in zip(selection, array._shape, array._chunks):
-
if is_integer(dim_sel):
dim_indexer = IntDimIndexer(dim_sel, dim_len, dim_chunk_len)
@@ -649,7 +632,6 @@ def __init__(self, selection, array):
def __iter__(self):
for dim_projections in itertools.product(*self.dim_indexers):
-
chunk_coords = tuple(p.dim_chunk_ix for p in dim_projections)
chunk_selection = tuple(p.dim_chunk_sel for p in dim_projections)
out_selection = tuple(
@@ -658,7 +640,6 @@ def __iter__(self):
# handle advanced indexing arrays orthogonally
if self.is_advanced:
-
# N.B., numpy doesn't support orthogonal indexing directly as yet,
# so need to work around via np.ix_. Also np.ix_ does not support a
# mixture of arrays and slices or integers, so need to convert slices
@@ -692,7 +673,6 @@ def __setitem__(self, selection, value):
# noinspection PyProtectedMember
class BlockIndexer:
def __init__(self, selection, array):
-
# handle ellipsis
selection = replace_ellipsis(selection, array._shape)
@@ -794,7 +774,6 @@ def is_mask_selection(selection, array):
# noinspection PyProtectedMember
class CoordinateIndexer:
def __init__(self, selection, array):
-
# some initial normalization
selection = ensure_tuple(selection)
selection = tuple([i] if is_integer(i) else i for i in selection)
@@ -810,7 +789,6 @@ def __init__(self, selection, array):
# handle wraparound, boundscheck
for dim_sel, dim_len in zip(selection, array.shape):
-
# handle wraparound
wraparound_indices(dim_sel, dim_len)
@@ -861,10 +839,8 @@ def __init__(self, selection, array):
self.chunk_mixs = np.unravel_index(self.chunk_rixs, array._cdata_shape)
def __iter__(self):
-
# iterate over chunks
for i, chunk_rix in enumerate(self.chunk_rixs):
-
chunk_coords = tuple(m[i] for m in self.chunk_mixs)
if chunk_rix == 0:
start = 0
@@ -891,7 +867,6 @@ def __iter__(self):
# noinspection PyProtectedMember
class MaskIndexer(CoordinateIndexer):
def __init__(self, selection, array):
-
# some initial normalization
selection = ensure_tuple(selection)
selection = replace_lists(selection)
diff --git a/zarr/meta.py b/zarr/meta.py
index 48791ddf17..f23889f3ea 100644
--- a/zarr/meta.py
+++ b/zarr/meta.py
@@ -89,7 +89,6 @@ class Metadata2:
@classmethod
def parse_metadata(cls, s: Union[MappingType, bytes, str]) -> MappingType[str, Any]:
-
# Here we allow that a store may return an already-parsed metadata object,
# or a string of JSON that we will parse here. We allow for an already-parsed
# object to accommodate a consolidated metadata store, where all the metadata for
diff --git a/zarr/n5.py b/zarr/n5.py
index 7e73905527..44b44e69e2 100644
--- a/zarr/n5.py
+++ b/zarr/n5.py
@@ -72,21 +72,18 @@ class N5Store(NestedDirectoryStore):
def __getitem__(self, key: str) -> bytes:
if key.endswith(zarr_group_meta_key):
-
key_new = key.replace(zarr_group_meta_key, n5_attrs_key)
value = group_metadata_to_zarr(self._load_n5_attrs(key_new))
return json_dumps(value)
elif key.endswith(zarr_array_meta_key):
-
key_new = key.replace(zarr_array_meta_key, n5_attrs_key)
top_level = key == zarr_array_meta_key
value = array_metadata_to_zarr(self._load_n5_attrs(key_new), top_level=top_level)
return json_dumps(value)
elif key.endswith(zarr_attrs_key):
-
key_new = key.replace(zarr_attrs_key, n5_attrs_key)
value = attrs_to_zarr(self._load_n5_attrs(key_new))
@@ -104,9 +101,7 @@ def __getitem__(self, key: str) -> bytes:
return super().__getitem__(key_new)
def __setitem__(self, key: str, value: Any):
-
if key.endswith(zarr_group_meta_key):
-
key_new = key.replace(zarr_group_meta_key, n5_attrs_key)
n5_attrs = self._load_n5_attrs(key_new)
@@ -115,7 +110,6 @@ def __setitem__(self, key: str, value: Any):
value = json_dumps(n5_attrs)
elif key.endswith(zarr_array_meta_key):
-
key_new = key.replace(zarr_array_meta_key, n5_attrs_key)
top_level = key == zarr_array_meta_key
n5_attrs = self._load_n5_attrs(key_new)
@@ -123,7 +117,6 @@ def __setitem__(self, key: str, value: Any):
value = json_dumps(n5_attrs)
elif key.endswith(zarr_attrs_key):
-
key_new = key.replace(zarr_attrs_key, n5_attrs_key)
n5_attrs = self._load_n5_attrs(key_new)
@@ -166,9 +159,7 @@ def __delitem__(self, key: str):
super().__delitem__(key_new)
def __contains__(self, key):
-
if key.endswith(zarr_group_meta_key):
-
key_new = key.replace(zarr_group_meta_key, n5_attrs_key)
if key_new not in self:
return False
@@ -176,18 +167,15 @@ def __contains__(self, key):
return "dimensions" not in self._load_n5_attrs(key_new)
elif key.endswith(zarr_array_meta_key):
-
key_new = key.replace(zarr_array_meta_key, n5_attrs_key)
# array if attributes contain 'dimensions'
return "dimensions" in self._load_n5_attrs(key_new)
elif key.endswith(zarr_attrs_key):
-
key_new = key.replace(zarr_attrs_key, n5_attrs_key)
return self._contains_attrs(key_new)
elif is_chunk_key(key):
-
key_new = invert_chunk_coords(key)
else:
key_new = key
@@ -198,7 +186,6 @@ def __eq__(self, other):
return isinstance(other, N5Store) and self.path == other.path
def listdir(self, path: Optional[str] = None):
-
if path is not None:
path = invert_chunk_coords(path)
path = cast(str, path)
@@ -208,7 +195,6 @@ def listdir(self, path: Optional[str] = None):
children = super().listdir(path=path)
if self._is_array(path):
-
# replace n5 attribute file with respective zarr attribute files
children.remove(n5_attrs_key)
children.append(zarr_array_meta_key)
@@ -234,7 +220,6 @@ def listdir(self, path: Optional[str] = None):
return sorted(new_children)
elif self._is_group(path):
-
# replace n5 attribute file with respective zarr attribute files
children.remove(n5_attrs_key)
children.append(zarr_group_meta_key)
@@ -244,7 +229,6 @@ def listdir(self, path: Optional[str] = None):
return sorted(children)
else:
-
return children
def _load_n5_attrs(self, path: str) -> Dict[str, Any]:
@@ -255,7 +239,6 @@ def _load_n5_attrs(self, path: str) -> Dict[str, Any]:
return {}
def _is_group(self, path: str):
-
if path is None:
attrs_key = n5_attrs_key
else:
@@ -265,7 +248,6 @@ def _is_group(self, path: str):
return len(n5_attrs) > 0 and "dimensions" not in n5_attrs
def _is_array(self, path: str):
-
if path is None:
attrs_key = n5_attrs_key
else:
@@ -274,7 +256,6 @@ def _is_array(self, path: str):
return "dimensions" in self._load_n5_attrs(attrs_key)
def _contains_attrs(self, path: str):
-
if path is None:
attrs_key = n5_attrs_key
else:
@@ -376,21 +357,18 @@ def _normalize_key(self, key: str):
def __getitem__(self, key: str) -> bytes:
if key.endswith(zarr_group_meta_key):
-
key_new = key.replace(zarr_group_meta_key, self._group_meta_key)
value = group_metadata_to_zarr(self._load_n5_attrs(key_new))
return json_dumps(value)
elif key.endswith(zarr_array_meta_key):
-
key_new = key.replace(zarr_array_meta_key, self._array_meta_key)
top_level = key == zarr_array_meta_key
value = array_metadata_to_zarr(self._load_n5_attrs(key_new), top_level=top_level)
return json_dumps(value)
elif key.endswith(zarr_attrs_key):
-
key_new = key.replace(zarr_attrs_key, self._attrs_key)
value = attrs_to_zarr(self._load_n5_attrs(key_new))
@@ -409,7 +387,6 @@ def __getitem__(self, key: str) -> bytes:
def __setitem__(self, key: str, value: Any):
if key.endswith(zarr_group_meta_key):
-
key_new = key.replace(zarr_group_meta_key, self._group_meta_key)
n5_attrs = self._load_n5_attrs(key_new)
@@ -418,7 +395,6 @@ def __setitem__(self, key: str, value: Any):
value = json_dumps(n5_attrs)
elif key.endswith(zarr_array_meta_key):
-
key_new = key.replace(zarr_array_meta_key, self._array_meta_key)
top_level = key == zarr_array_meta_key
n5_attrs = self._load_n5_attrs(key_new)
@@ -427,7 +403,6 @@ def __setitem__(self, key: str, value: Any):
value = json_dumps(n5_attrs)
elif key.endswith(zarr_attrs_key):
-
key_new = key.replace(zarr_attrs_key, self._attrs_key)
n5_attrs = self._load_n5_attrs(key_new)
@@ -456,7 +431,6 @@ def __setitem__(self, key: str, value: Any):
super().__setitem__(key_new, value)
def __delitem__(self, key: str):
-
if key.endswith(zarr_group_meta_key):
key_new = key.replace(zarr_group_meta_key, self._group_meta_key)
elif key.endswith(zarr_array_meta_key):
@@ -471,7 +445,6 @@ def __delitem__(self, key: str):
def __contains__(self, key: Any):
if key.endswith(zarr_group_meta_key):
-
key_new = key.replace(zarr_group_meta_key, self._group_meta_key)
if key_new not in self:
return False
@@ -479,13 +452,11 @@ def __contains__(self, key: Any):
return "dimensions" not in self._load_n5_attrs(key_new)
elif key.endswith(zarr_array_meta_key):
-
key_new = key.replace(zarr_array_meta_key, self._array_meta_key)
# array if attributes contain 'dimensions'
return "dimensions" in self._load_n5_attrs(key_new)
elif key.endswith(zarr_attrs_key):
-
key_new = key.replace(zarr_attrs_key, self._attrs_key)
return self._contains_attrs(key_new)
@@ -508,7 +479,6 @@ def listdir(self, path: Optional[str] = None):
# doesn't provide.
children = super().listdir(path=path)
if self._is_array(path):
-
# replace n5 attribute file with respective zarr attribute files
children.remove(self._array_meta_key)
children.append(zarr_array_meta_key)
@@ -532,7 +502,6 @@ def listdir(self, path: Optional[str] = None):
return sorted(new_children)
elif self._is_group(path):
-
# replace n5 attribute file with respective zarr attribute files
children.remove(self._group_meta_key)
children.append(zarr_group_meta_key)
@@ -550,7 +519,6 @@ def _load_n5_attrs(self, path: str):
return {}
def _is_group(self, path: Optional[str]):
-
if path is None:
attrs_key = self._attrs_key
else:
@@ -560,7 +528,6 @@ def _is_group(self, path: Optional[str]):
return len(n5_attrs) > 0 and "dimensions" not in n5_attrs
def _is_array(self, path: Optional[str]):
-
if path is None:
attrs_key = self._attrs_key
else:
@@ -569,7 +536,6 @@ def _is_array(self, path: Optional[str]):
return "dimensions" in self._load_n5_attrs(attrs_key)
def _contains_attrs(self, path: Optional[str]):
-
if path is None:
attrs_key = self._attrs_key
else:
@@ -712,7 +678,6 @@ def attrs_to_zarr(attrs: Dict[str, Any]) -> Dict[str, Any]:
def compressor_config_to_n5(compressor_config: Optional[Dict[str, Any]]) -> Dict[str, Any]:
-
if compressor_config is None:
return {"type": "raw"}
else:
@@ -726,19 +691,16 @@ def compressor_config_to_n5(compressor_config: Optional[Dict[str, Any]]) -> Dict
n5_config = {"type": codec_id}
if codec_id == "bz2":
-
n5_config["type"] = "bzip2"
n5_config["blockSize"] = _compressor_config["level"]
elif codec_id == "blosc":
-
n5_config["cname"] = _compressor_config["cname"]
n5_config["clevel"] = _compressor_config["clevel"]
n5_config["shuffle"] = _compressor_config["shuffle"]
n5_config["blocksize"] = _compressor_config["blocksize"]
elif codec_id == "lzma":
-
# Switch to XZ for N5 if we are using the default XZ format.
# Note: 4 is the default, which is lzma.CHECK_CRC64.
if _compressor_config["format"] == 1 and _compressor_config["check"] in [-1, 4]:
@@ -760,50 +722,42 @@ def compressor_config_to_n5(compressor_config: Optional[Dict[str, Any]]) -> Dict
n5_config["preset"] = 6
elif codec_id == "zlib":
-
n5_config["type"] = "gzip"
n5_config["level"] = _compressor_config["level"]
n5_config["useZlib"] = True
elif codec_id == "gzip":
-
n5_config["type"] = "gzip"
n5_config["level"] = _compressor_config["level"]
n5_config["useZlib"] = False
else:
-
n5_config.update({k: v for k, v in _compressor_config.items() if k != "type"})
return n5_config
def compressor_config_to_zarr(compressor_config: Dict[str, Any]) -> Optional[Dict[str, Any]]:
-
codec_id = compressor_config["type"]
zarr_config = {"id": codec_id}
if codec_id == "bzip2":
-
zarr_config["id"] = "bz2"
zarr_config["level"] = compressor_config["blockSize"]
elif codec_id == "blosc":
-
zarr_config["cname"] = compressor_config["cname"]
zarr_config["clevel"] = compressor_config["clevel"]
zarr_config["shuffle"] = compressor_config["shuffle"]
zarr_config["blocksize"] = compressor_config["blocksize"]
elif codec_id == "lzma":
-
zarr_config["format"] = compressor_config["format"]
zarr_config["check"] = compressor_config["check"]
zarr_config["preset"] = compressor_config["preset"]
zarr_config["filters"] = compressor_config["filters"]
elif codec_id == "xz":
-
zarr_config["id"] = "lzma"
zarr_config["format"] = 1 # lzma.FORMAT_XZ
zarr_config["check"] = -1
@@ -811,7 +765,6 @@ def compressor_config_to_zarr(compressor_config: Dict[str, Any]) -> Optional[Dic
zarr_config["filters"] = None
elif codec_id == "gzip":
-
if "useZlib" in compressor_config and compressor_config["useZlib"]:
zarr_config["id"] = "zlib"
zarr_config["level"] = compressor_config["level"]
@@ -820,22 +773,18 @@ def compressor_config_to_zarr(compressor_config: Dict[str, Any]) -> Optional[Dic
zarr_config["level"] = compressor_config["level"]
elif codec_id == "raw":
-
return None
else:
-
zarr_config.update({k: v for k, v in compressor_config.items() if k != "type"})
return zarr_config
class N5ChunkWrapper(Codec):
-
codec_id = "n5_wrapper"
def __init__(self, dtype, chunk_shape, compressor_config=None, compressor=None):
-
self.dtype = np.dtype(dtype)
self.chunk_shape = tuple(chunk_shape)
# is the dtype a little endian format?
@@ -860,7 +809,6 @@ def get_config(self):
return config
def encode(self, chunk):
-
assert chunk.flags.c_contiguous
header = self._create_header(chunk)
@@ -872,12 +820,10 @@ def encode(self, chunk):
return header + chunk.tobytes(order="A")
def decode(self, chunk, out=None) -> bytes:
-
len_header, chunk_shape = self._read_header(chunk)
chunk = chunk[len_header:]
if out is not None:
-
# out should only be used if we read a complete chunk
assert chunk_shape == self.chunk_shape, "Expected chunk of shape {}, found {}".format(
self.chunk_shape, chunk_shape
@@ -895,7 +841,6 @@ def decode(self, chunk, out=None) -> bytes:
return out
else:
-
if self._compressor:
chunk = self._compressor.decode(chunk)
@@ -915,7 +860,6 @@ def decode(self, chunk, out=None) -> bytes:
@staticmethod
def _create_header(chunk):
-
mode = struct.pack(">H", 0)
num_dims = struct.pack(">H", len(chunk.shape))
shape = b"".join(struct.pack(">I", d) for d in chunk.shape[::-1])
@@ -924,7 +868,6 @@ def _create_header(chunk):
@staticmethod
def _read_header(chunk):
-
num_dims = struct.unpack(">H", chunk[2:4])[0]
shape = tuple(
struct.unpack(">I", chunk[i : i + 4])[0] for i in range(4, num_dims * 4 + 4, 4)
diff --git a/zarr/storage.py b/zarr/storage.py
index b36f804ebd..585417f59c 100644
--- a/zarr/storage.py
+++ b/zarr/storage.py
@@ -28,6 +28,7 @@
import zipfile
from collections import OrderedDict
from collections.abc import MutableMapping
+from functools import lru_cache
from os import scandir
from pickle import PicklingError
from threading import Lock, RLock
@@ -482,7 +483,6 @@ def _init_array_metadata(
dimension_separator=None,
storage_transformers=(),
):
-
store_version = getattr(store, "_store_version", 2)
path = normalize_storage_path(path)
@@ -687,7 +687,6 @@ def _init_group_metadata(
path: Optional[str] = None,
chunk_store: Optional[StoreLike] = None,
):
-
store_version = getattr(store, "_store_version", 2)
path = normalize_storage_path(path)
@@ -1055,7 +1054,6 @@ class DirectoryStore(Store):
"""
def __init__(self, path, normalize_keys=False, dimension_separator=None):
-
# guard conditions
path = os.path.abspath(path)
if os.path.exists(path) and not os.path.isdir(path):
@@ -1415,7 +1413,6 @@ def _normalize_key(self, key):
def getitems(
self, keys: Sequence[str], *, contexts: Mapping[str, Context]
) -> Mapping[str, Any]:
-
keys_transformed = [self._normalize_key(key) for key in keys]
results = self.map.getitems(keys_transformed, on_error="omit")
# The function calling this method may not recognize the transformed keys
@@ -1540,6 +1537,7 @@ def clear(self):
self.map.clear()
@classmethod
+ @lru_cache(maxsize=None)
def _fsspec_installed(cls):
"""Returns true if fsspec is installed"""
import importlib.util
@@ -1768,7 +1766,6 @@ def __init__(
mode="a",
dimension_separator=None,
):
-
# store properties
path = os.path.abspath(path)
self.path = path
diff --git a/zarr/tests/test_attrs.py b/zarr/tests/test_attrs.py
index 7dd5b340a2..2d9553971b 100644
--- a/zarr/tests/test_attrs.py
+++ b/zarr/tests/test_attrs.py
@@ -30,7 +30,6 @@ def init_attributes(self, store, read_only=False, cache=True, zarr_version=2):
return Attributes(store, key=root + "attrs", read_only=read_only, cache=cache)
def test_storage(self, zarr_version):
-
store = _init_store(zarr_version)
root = ".z" if zarr_version == 2 else meta_root
attrs_key = root + "attrs"
@@ -50,7 +49,6 @@ def test_storage(self, zarr_version):
assert dict(foo="bar", baz=42) == d
def test_utf8_encoding(self, zarr_version):
-
project_root = pathlib.Path(zarr.__file__).resolve().parent.parent
fixdir = project_root / "fixture"
testdir = fixdir / "utf8attrs"
@@ -67,7 +65,6 @@ def test_utf8_encoding(self, zarr_version):
assert fixture["utf8attrs"].attrs.asdict() == dict(foo="た")
def test_get_set_del_contains(self, zarr_version):
-
store = _init_store(zarr_version)
a = self.init_attributes(store, zarr_version=zarr_version)
assert "foo" not in a
@@ -84,7 +81,6 @@ def test_get_set_del_contains(self, zarr_version):
a["foo"]
def test_update_put(self, zarr_version):
-
store = _init_store(zarr_version)
a = self.init_attributes(store, zarr_version=zarr_version)
assert "foo" not in a
@@ -102,7 +98,6 @@ def test_update_put(self, zarr_version):
assert "baz" not in a
def test_iterators(self, zarr_version):
-
store = _init_store(zarr_version)
a = self.init_attributes(store, zarr_version=zarr_version)
assert 0 == len(a)
@@ -232,7 +227,6 @@ def test_caching_on(self, zarr_version):
assert get_cnt == store.counter["__getitem__", attrs_key]
def test_caching_off(self, zarr_version):
-
# setup store
store = CountingDict() if zarr_version == 2 else CountingDictV3()
attrs_key = ".zattrs" if zarr_version == 2 else "meta/root/attrs"
diff --git a/zarr/tests/test_convenience.py b/zarr/tests/test_convenience.py
index 389ce90a9d..7d190adc2c 100644
--- a/zarr/tests/test_convenience.py
+++ b/zarr/tests/test_convenience.py
@@ -57,7 +57,6 @@ def _init_creation_kwargs(zarr_version):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
def test_open_array(path_type, zarr_version):
-
store = tempfile.mkdtemp()
atexit.register(atexit_rmtree, store)
store = path_type(store)
@@ -86,7 +85,6 @@ def test_open_array(path_type, zarr_version):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
def test_open_group(path_type, zarr_version):
-
store = tempfile.mkdtemp()
atexit.register(atexit_rmtree, store)
store = path_type(store)
@@ -210,7 +208,6 @@ def test_tree(zarr_version):
def test_consolidate_metadata(
with_chunk_store, zarr_version, listable, monkeypatch, stores_from_path
):
-
# setup initial data
if stores_from_path:
store = tempfile.mkdtemp()
@@ -399,7 +396,6 @@ def test_save_array_separator(tmpdir, options):
class TestCopyStore(unittest.TestCase):
-
_version = 2
def setUp(self):
@@ -536,7 +532,6 @@ def test_if_exists(self):
@pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
class TestCopyStoreV3(TestCopyStore):
-
_version = 3
def setUp(self):
@@ -557,7 +552,6 @@ def test_mismatched_store_versions(self):
def check_copied_array(original, copied, without_attrs=False, expect_props=None):
-
# setup
source_h5py = original.__module__.startswith("h5py.")
dest_h5py = copied.__module__.startswith("h5py.")
@@ -621,7 +615,6 @@ def check_copied_array(original, copied, without_attrs=False, expect_props=None)
def check_copied_group(original, copied, without_attrs=False, expect_props=None, shallow=False):
-
# setup
if expect_props is None:
expect_props = dict()
diff --git a/zarr/tests/test_creation.py b/zarr/tests/test_creation.py
index b44c6379fd..8e586abfff 100644
--- a/zarr/tests/test_creation.py
+++ b/zarr/tests/test_creation.py
@@ -74,7 +74,6 @@ def _init_creation_kwargs(zarr_version, at_root=True):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
@pytest.mark.parametrize("at_root", [False, True])
def test_array(zarr_version, at_root):
-
expected_zarr_version = DEFAULT_ZARR_VERSION if zarr_version is None else zarr_version
kwargs = _init_creation_kwargs(zarr_version, at_root)
@@ -213,7 +212,6 @@ def test_full_additional_dtypes(zarr_version):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
@pytest.mark.parametrize("at_root", [False, True])
def test_open_array(zarr_version, at_root, dimension_separator):
-
store = "data/array.zarr"
kwargs = _init_creation_kwargs(zarr_version, at_root)
@@ -329,7 +327,6 @@ def test_open_array(zarr_version, at_root, dimension_separator):
def test_open_array_none():
-
# open with both store and zarr_version = None
z = open_array(mode="w", shape=100, chunks=10)
assert isinstance(z, Array)
@@ -339,7 +336,6 @@ def test_open_array_none():
@pytest.mark.parametrize("dimension_separator", [".", "/", None])
@pytest.mark.parametrize("zarr_version", _VERSIONS2)
def test_open_array_infer_separator_from_store(zarr_version, dimension_separator):
-
if zarr_version == 3:
StoreClass = DirectoryStoreV3
path = "data"
@@ -370,7 +366,6 @@ def test_open_array_infer_separator_from_store(zarr_version, dimension_separator
# TODO: N5 support for v3
@pytest.mark.parametrize("zarr_version", [None, 2])
def test_open_array_n5(zarr_version):
-
store = "data/array.zarr"
kwargs = _init_creation_kwargs(zarr_version)
@@ -409,7 +404,6 @@ def test_open_array_n5(zarr_version):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
@pytest.mark.parametrize("at_root", [False, True])
def test_open_array_dict_store(zarr_version, at_root):
-
# dict will become a KVStore
store = dict()
kwargs = _init_creation_kwargs(zarr_version, at_root)
@@ -503,7 +497,6 @@ def test_empty_like(zarr_version, at_root):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
@pytest.mark.parametrize("at_root", [False, True])
def test_zeros_like(zarr_version, at_root):
-
kwargs = _init_creation_kwargs(zarr_version, at_root)
expected_zarr_version = DEFAULT_ZARR_VERSION if zarr_version is None else zarr_version
@@ -529,7 +522,6 @@ def test_zeros_like(zarr_version, at_root):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
@pytest.mark.parametrize("at_root", [False, True])
def test_ones_like(zarr_version, at_root):
-
kwargs = _init_creation_kwargs(zarr_version, at_root)
expected_zarr_version = DEFAULT_ZARR_VERSION if zarr_version is None else zarr_version
@@ -556,7 +548,6 @@ def test_ones_like(zarr_version, at_root):
@pytest.mark.parametrize("zarr_version", _VERSIONS)
@pytest.mark.parametrize("at_root", [False, True])
def test_full_like(zarr_version, at_root):
-
kwargs = _init_creation_kwargs(zarr_version, at_root)
expected_zarr_version = DEFAULT_ZARR_VERSION if zarr_version is None else zarr_version
diff --git a/zarr/tests/test_dim_separator.py b/zarr/tests/test_dim_separator.py
index 987852dfd0..0a5814e65f 100644
--- a/zarr/tests/test_dim_separator.py
+++ b/zarr/tests/test_dim_separator.py
@@ -46,7 +46,6 @@ def dataset(tmpdir, request):
static = project_root / "fixture" / suffix
if not static.exists(): # pragma: no cover
-
if "nested" in which:
# No way to reproduce the nested_legacy file via code
generator = NestedDirectoryStore
diff --git a/zarr/tests/test_filters.py b/zarr/tests/test_filters.py
index d55be9145f..fc63cdca8d 100644
--- a/zarr/tests/test_filters.py
+++ b/zarr/tests/test_filters.py
@@ -30,7 +30,6 @@
def test_array_with_delta_filter():
-
# setup
astype = "u1"
dtype = "i8"
@@ -38,7 +37,6 @@ def test_array_with_delta_filter():
data = np.arange(100, dtype=dtype)
for compressor in compressors:
-
a = array(data, chunks=10, compressor=compressor, filters=filters)
# check round-trip
@@ -57,7 +55,6 @@ def test_array_with_delta_filter():
def test_array_with_astype_filter():
-
# setup
encode_dtype = "i1"
decode_dtype = "i8"
@@ -68,7 +65,6 @@ def test_array_with_astype_filter():
data = np.arange(shape, dtype=decode_dtype)
for compressor in compressors:
-
a = array(data, chunks=chunks, compressor=compressor, filters=filters)
# check round-trip
@@ -88,7 +84,6 @@ def test_array_with_astype_filter():
def test_array_with_scaleoffset_filter():
-
# setup
astype = "u1"
dtype = "f8"
@@ -97,7 +92,6 @@ def test_array_with_scaleoffset_filter():
data = np.linspace(1000, 1001, 34, dtype="f8")
for compressor in compressors:
-
a = array(data, chunks=5, compressor=compressor, filters=filters)
# check round-trip
@@ -116,7 +110,6 @@ def test_array_with_scaleoffset_filter():
def test_array_with_quantize_filter():
-
# setup
dtype = "f8"
digits = 3
@@ -125,7 +118,6 @@ def test_array_with_quantize_filter():
data = np.linspace(0, 1, 34, dtype=dtype)
for compressor in compressors:
-
a = array(data, chunks=5, compressor=compressor, filters=filters)
# check round-trip
@@ -144,14 +136,12 @@ def test_array_with_quantize_filter():
def test_array_with_packbits_filter():
-
# setup
flt = PackBits()
filters = [flt]
data = np.random.randint(0, 2, size=100, dtype=bool)
for compressor in compressors:
-
a = array(data, chunks=5, compressor=compressor, filters=filters)
# check round-trip
@@ -170,14 +160,12 @@ def test_array_with_packbits_filter():
def test_array_with_categorize_filter():
-
# setup
data = np.random.choice(["foo", "bar", "baz"], size=100)
flt = Categorize(dtype=data.dtype, labels=["foo", "bar", "baz"])
filters = [flt]
for compressor in compressors:
-
a = array(data, chunks=5, compressor=compressor, filters=filters)
# check round-trip
diff --git a/zarr/tests/test_hierarchy.py b/zarr/tests/test_hierarchy.py
index cbf59c55c3..6c08d7b88a 100644
--- a/zarr/tests/test_hierarchy.py
+++ b/zarr/tests/test_hierarchy.py
@@ -1085,7 +1085,6 @@ def test_paths(self):
g1.store.close()
def test_pickle(self):
-
# setup group
g = self.create_group()
d = g.create_dataset("foo/bar", shape=100, chunks=10)
@@ -1113,7 +1112,6 @@ def test_pickle(self):
g2.store.close()
def test_context_manager(self):
-
with self.create_group() as g:
d = g.create_dataset("foo/bar", shape=100, chunks=10)
d[:] = np.arange(100)
@@ -1375,7 +1373,6 @@ def create_store():
return store, None
def test_context_manager(self):
-
with self.create_group() as g:
store = g.store
d = g.create_dataset("foo/bar", shape=100, chunks=10)
diff --git a/zarr/tests/test_indexing.py b/zarr/tests/test_indexing.py
index 8a34c1e715..f10360e8b7 100644
--- a/zarr/tests/test_indexing.py
+++ b/zarr/tests/test_indexing.py
@@ -17,7 +17,6 @@
def test_normalize_integer_selection():
-
assert 1 == normalize_integer_selection(1, 100)
assert 99 == normalize_integer_selection(-1, 100)
with pytest.raises(IndexError):
@@ -29,7 +28,6 @@ def test_normalize_integer_selection():
def test_replace_ellipsis():
-
# 1D, single item
assert (0,) == replace_ellipsis(0, (100,))
@@ -68,7 +66,6 @@ def test_replace_ellipsis():
def test_get_basic_selection_0d():
-
# setup
a = np.array(42)
z = zarr.create(shape=a.shape, dtype=a.dtype, fill_value=None)
@@ -191,7 +188,6 @@ def _test_get_basic_selection(a, z, selection):
# noinspection PyStatementEffect
def test_get_basic_selection_1d():
-
# setup
a = np.arange(1050, dtype=int)
z = zarr.create(shape=a.shape, chunks=100, dtype=a.dtype)
@@ -264,7 +260,6 @@ def test_get_basic_selection_1d():
# noinspection PyStatementEffect
def test_get_basic_selection_2d():
-
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr.create(shape=a.shape, chunks=(300, 3), dtype=a.dtype)
@@ -423,7 +418,6 @@ def test_fancy_indexing_doesnt_mix_with_implicit_slicing():
def test_set_basic_selection_0d():
-
# setup
v = np.array(42)
a = np.zeros_like(v)
@@ -479,7 +473,6 @@ def _test_get_orthogonal_selection(a, z, selection):
# noinspection PyStatementEffect
def test_get_orthogonal_selection_1d_bool():
-
# setup
a = np.arange(1050, dtype=int)
z = zarr.create(shape=a.shape, chunks=100, dtype=a.dtype)
@@ -502,7 +495,6 @@ def test_get_orthogonal_selection_1d_bool():
# noinspection PyStatementEffect
def test_get_orthogonal_selection_1d_int():
-
# setup
a = np.arange(1050, dtype=int)
z = zarr.create(shape=a.shape, chunks=100, dtype=a.dtype)
@@ -561,7 +553,6 @@ def _test_get_orthogonal_selection_2d(a, z, ix0, ix1):
# noinspection PyStatementEffect
def test_get_orthogonal_selection_2d():
-
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr.create(shape=a.shape, chunks=(300, 3), dtype=a.dtype)
@@ -570,7 +561,6 @@ def test_get_orthogonal_selection_2d():
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
-
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
@@ -641,7 +631,6 @@ def _test_get_orthogonal_selection_3d(a, z, ix0, ix1, ix2):
def test_get_orthogonal_selection_3d():
-
# setup
a = np.arange(100000, dtype=int).reshape(200, 50, 10)
z = zarr.create(shape=a.shape, chunks=(60, 20, 3), dtype=a.dtype)
@@ -650,7 +639,6 @@ def test_get_orthogonal_selection_3d():
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
-
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
@@ -673,7 +661,6 @@ def test_get_orthogonal_selection_3d():
def test_orthogonal_indexing_edge_cases():
-
a = np.arange(6).reshape(1, 2, 3)
z = zarr.create(shape=a.shape, chunks=(1, 2, 3), dtype=a.dtype)
z[:] = a
@@ -706,7 +693,6 @@ def _test_set_orthogonal_selection(v, a, z, selection):
def test_set_orthogonal_selection_1d():
-
# setup
v = np.arange(1050, dtype=int)
a = np.empty(v.shape, dtype=int)
@@ -715,7 +701,6 @@ def test_set_orthogonal_selection_1d():
# test with different degrees of sparseness
np.random.seed(42)
for p in 0.5, 0.1, 0.01:
-
# boolean arrays
ix = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
_test_set_orthogonal_selection(v, a, z, ix)
@@ -734,7 +719,6 @@ def test_set_orthogonal_selection_1d():
def _test_set_orthogonal_selection_2d(v, a, z, ix0, ix1):
-
selections = [
# index both axes with array
(ix0, ix1),
@@ -749,7 +733,6 @@ def _test_set_orthogonal_selection_2d(v, a, z, ix0, ix1):
def test_set_orthogonal_selection_2d():
-
# setup
v = np.arange(10000, dtype=int).reshape(1000, 10)
a = np.empty_like(v)
@@ -758,7 +741,6 @@ def test_set_orthogonal_selection_2d():
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
-
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
@@ -780,7 +762,6 @@ def test_set_orthogonal_selection_2d():
def _test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2):
-
selections = (
# single value
(84, 42, 4),
@@ -807,7 +788,6 @@ def _test_set_orthogonal_selection_3d(v, a, z, ix0, ix1, ix2):
def test_set_orthogonal_selection_3d():
-
# setup
v = np.arange(100000, dtype=int).reshape(200, 50, 10)
a = np.empty_like(v)
@@ -816,7 +796,6 @@ def test_set_orthogonal_selection_3d():
np.random.seed(42)
# test with different degrees of sparseness
for p in 0.5, 0.1, 0.01:
-
# boolean arrays
ix0 = np.random.binomial(1, p, size=a.shape[0]).astype(bool)
ix1 = np.random.binomial(1, 0.5, size=a.shape[1]).astype(bool)
@@ -888,7 +867,6 @@ def _test_get_coordinate_selection(a, z, selection):
# noinspection PyStatementEffect
def test_get_coordinate_selection_1d():
-
# setup
a = np.arange(1050, dtype=int)
z = zarr.create(shape=a.shape, chunks=100, dtype=a.dtype)
@@ -932,7 +910,6 @@ def test_get_coordinate_selection_1d():
def test_get_coordinate_selection_2d():
-
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr.create(shape=a.shape, chunks=(300, 3), dtype=a.dtype)
@@ -1027,7 +1004,6 @@ def test_set_coordinate_selection_1d():
def test_set_coordinate_selection_2d():
-
# setup
v = np.arange(10000, dtype=int).reshape(1000, 10)
a = np.empty_like(v)
@@ -1258,7 +1234,6 @@ def _test_get_mask_selection(a, z, selection):
# noinspection PyStatementEffect
def test_get_mask_selection_1d():
-
# setup
a = np.arange(1050, dtype=int)
z = zarr.create(shape=a.shape, chunks=100, dtype=a.dtype)
@@ -1285,7 +1260,6 @@ def test_get_mask_selection_1d():
# noinspection PyStatementEffect
def test_get_mask_selection_2d():
-
# setup
a = np.arange(10000, dtype=int).reshape(1000, 10)
z = zarr.create(shape=a.shape, chunks=(300, 3), dtype=a.dtype)
@@ -1318,7 +1292,6 @@ def _test_set_mask_selection(v, a, z, selection):
def test_set_mask_selection_1d():
-
# setup
v = np.arange(1050, dtype=int)
a = np.empty_like(v)
@@ -1338,7 +1311,6 @@ def test_set_mask_selection_1d():
def test_set_mask_selection_2d():
-
# setup
v = np.arange(10000, dtype=int).reshape(1000, 10)
a = np.empty_like(v)
@@ -1352,7 +1324,6 @@ def test_set_mask_selection_2d():
def test_get_selection_out():
-
# basic selections
a = np.arange(1050)
z = zarr.create(shape=1050, chunks=100, dtype=a.dtype)
@@ -1426,7 +1397,6 @@ def test_get_selection_out():
def test_get_selections_with_fields():
-
a = [("aaa", 1, 4.2), ("bbb", 2, 8.4), ("ccc", 3, 12.6)]
a = np.array(a, dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")])
z = zarr.create(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=None)
@@ -1444,7 +1414,6 @@ def test_get_selections_with_fields():
]
for fields in fields_fixture:
-
# total selection
expect = a[fields]
actual = z.get_basic_selection(Ellipsis, fields=fields)
@@ -1534,7 +1503,6 @@ def test_get_selections_with_fields():
def test_set_selections_with_fields():
-
v = [("aaa", 1, 4.2), ("bbb", 2, 8.4), ("ccc", 3, 12.6)]
v = np.array(v, dtype=[("foo", "S3"), ("bar", "i4"), ("baz", "f8")])
a = np.empty_like(v)
@@ -1553,7 +1521,6 @@ def test_set_selections_with_fields():
]
for fields in fields_fixture:
-
# currently multi-field assignment is not supported in numpy, so we won't support
# it either
if isinstance(fields, list) and len(fields) > 1:
@@ -1567,7 +1534,6 @@ def test_set_selections_with_fields():
z.set_mask_selection([True, False, True], v, fields=fields)
else:
-
if isinstance(fields, list) and len(fields) == 1:
# work around numpy does not support multi-field assignment even if there
# is only one field
@@ -1752,7 +1718,6 @@ def test_accessed_chunks(shape, chunks, ops):
z = zarr.create(shape=shape, chunks=chunks, store=store)
for ii, (optype, slices) in enumerate(ops):
-
# Resolve the slices into the accessed chunks for each dimension
chunks_per_dim = []
for N, C, sl in zip(shape, chunks, slices):
diff --git a/zarr/tests/test_info.py b/zarr/tests/test_info.py
index 7fb6feb11b..96eae999f4 100644
--- a/zarr/tests/test_info.py
+++ b/zarr/tests/test_info.py
@@ -7,7 +7,6 @@
@pytest.mark.parametrize("array_size", [10, 15000])
def test_info(array_size):
-
# setup
g = zarr.group(store=dict(), chunk_store=dict(), synchronizer=zarr.ThreadSynchronizer())
g.create_group("foo")
diff --git a/zarr/tests/test_meta.py b/zarr/tests/test_meta.py
index db50560c8e..3e1e0f9d63 100644
--- a/zarr/tests/test_meta.py
+++ b/zarr/tests/test_meta.py
@@ -34,7 +34,6 @@ def assert_json_equal(expect, actual):
def test_encode_decode_array_1():
-
meta = dict(
shape=(100,),
chunks=(10,),
@@ -76,7 +75,6 @@ def test_encode_decode_array_1():
def test_encode_decode_array_2():
-
# some variations
df = Delta(astype=" Tupl
def normalize_dtype(dtype: Union[str, np.dtype], object_codec) -> Tuple[np.dtype, Any]:
-
# convenience API for object arrays
if inspect.isclass(dtype):
dtype = dtype.__name__ # type: ignore
@@ -245,7 +244,6 @@ def is_total_slice(item, shape: Tuple[int]) -> bool:
def normalize_resize_args(old_shape, *args):
-
# normalize new shape argument
if len(args) == 1:
new_shape = args[0]
@@ -294,7 +292,6 @@ def normalize_dimension_separator(sep: Optional[str]) -> Optional[str]:
def normalize_fill_value(fill_value, dtype: np.dtype):
-
if fill_value is None or dtype.hasobject:
# no fill value
pass
@@ -332,7 +329,6 @@ def normalize_fill_value(fill_value, dtype: np.dtype):
def normalize_storage_path(path: Union[str, bytes, None]) -> str:
-
# handle bytes
if isinstance(path, bytes):
path = str(path, "ascii")
@@ -342,7 +338,6 @@ def normalize_storage_path(path: Union[str, bytes, None]) -> str:
path = str(path)
if path:
-
# convert backslash to forward slash
path = path.replace("\\", "/")
@@ -506,7 +501,6 @@ def tree_widget(group, expand, level):
class TreeViewer:
def __init__(self, group, expand=False, level=None):
-
self.group = group
self.expand = expand
self.level = level