From 724daac357e326962b2445f33c7e1e138073b6a3 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 19:04:59 +0300 Subject: [PATCH 1/9] MNT: Fix configparser + python 2 deprecations --- versioneer.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/versioneer.py b/versioneer.py index 7c8333493e..eec2ab0f07 100644 --- a/versioneer.py +++ b/versioneer.py @@ -276,11 +276,7 @@ """ -from __future__ import print_function -try: - import configparser -except ImportError: - import ConfigParser as configparser +import configparser import errno import json import os @@ -340,9 +336,9 @@ def get_config_from_root(root): # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() + parser = configparser.ConfigParser() with open(setup_cfg, "r") as f: - parser.readfp(f) + parser.read_file(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): From f3d8f75ed03bebefc4393537f92ed59e5b28246c Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 19:30:10 +0300 Subject: [PATCH 2/9] RF: Use np.flip instead of flip_axis The package requires numpy >= 1.13, np.flip was added in 1.12 Add deprecation Remove dedicated test --- nibabel/orientations.py | 29 +++++++---------------------- nibabel/tests/test_orientations.py | 24 +----------------------- nibabel/tests/test_processing.py | 4 ++-- nibabel/tests/test_scripts.py | 10 +++++----- 4 files changed, 15 insertions(+), 52 deletions(-) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index ddea3159d0..132d795d76 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -165,7 +165,7 @@ def apply_orientation(arr, ornt): # apply ornt transformations for ax, flip in enumerate(ornt[:, 1]): if flip == -1: - t_arr = flip_axis(t_arr, axis=ax) + t_arr = np.flip(t_arr, axis=ax) full_transpose = np.arange(t_arr.ndim) # ornt indicates the transpose that has occurred - we reverse it full_transpose[:n] = np.argsort(ornt[:, 0]) @@ -237,13 +237,14 @@ def orientation_affine(ornt, shape): return inv_ornt_aff(ornt, shape) +@deprecate_with_version('flip_axis is deprecated. ' + 'Please use numpy.flip instead' + '3.2', + '4.0') def flip_axis(arr, axis=0): ''' Flip contents of `axis` in array `arr` - ``flip_axis`` is the same transform as ``np.flipud``, but for any - axis. For example ``flip_axis(arr, axis=0)`` is the same transform - as ``np.flipud(arr)``, and ``flip_axis(arr, axis=1)`` is the same - transform as ``np.fliplr(arr)`` + Equivalent to ``np.flip(arr, axis=0)``. Parameters ---------- @@ -255,24 +256,8 @@ def flip_axis(arr, axis=0): ------- farr : array Array with axis `axis` flipped - - Examples - -------- - >>> a = np.arange(6).reshape((2,3)) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> flip_axis(a, axis=0) - array([[3, 4, 5], - [0, 1, 2]]) - >>> flip_axis(a, axis=1) - array([[2, 1, 0], - [5, 4, 3]]) ''' - arr = np.asanyarray(arr) - arr = arr.swapaxes(0, axis) - arr = np.flipud(arr) - return arr.swapaxes(axis, 0) + return np.flip(arr, axis) def ornt2axcodes(ornt, labels=None): diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index a3ad215488..2322d570be 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -16,7 +16,7 @@ from numpy.testing import assert_array_equal from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, - flip_axis, apply_orientation, OrientationError, + apply_orientation, OrientationError, ornt2axcodes, axcodes2ornt, aff2axcodes, orientation_affine) @@ -140,28 +140,6 @@ def test_apply(): assert_array_equal(a.shape, np.array(t_arr.shape)[np.array(ornt)[:, 0]]) -def test_flip_axis(): - a = np.arange(24).reshape((2, 3, 4)) - assert_array_equal( - flip_axis(a), - np.flipud(a)) - assert_array_equal( - flip_axis(a, axis=0), - np.flipud(a)) - assert_array_equal( - flip_axis(a, axis=1), - np.fliplr(a)) - # check accepts array-like - assert_array_equal( - flip_axis(a.tolist(), axis=0), - np.flipud(a)) - # third dimension - b = a.transpose() - b = np.flipud(b) - b = b.transpose() - assert_array_equal(flip_axis(a, axis=2), b) - - def test_io_orientation(): for shape in ((2, 3, 4), (20, 15, 7)): for in_arr, out_ornt in zip(IN_ARRS, OUT_ORNTS): diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 64139bab43..5582a49128 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -23,7 +23,7 @@ conform) from nibabel.nifti1 import Nifti1Image from nibabel.nifti2 import Nifti2Image -from nibabel.orientations import aff2axcodes, flip_axis, inv_ornt_aff +from nibabel.orientations import aff2axcodes, inv_ornt_aff from nibabel.affines import (AffineError, from_matvec, to_matvec, apply_affine, voxel_sizes) from nibabel.eulerangles import euler2mat @@ -110,7 +110,7 @@ def test_resample_from_to(): ax_flip_ornt = flip_ornt.copy() ax_flip_ornt[axis, 1] = -1 aff_flip_i = inv_ornt_aff(ax_flip_ornt, (2, 3, 4)) - flipped_img = Nifti1Image(flip_axis(data, axis), + flipped_img = Nifti1Image(np.flip(data, axis), np.dot(affine, aff_flip_i)) out = resample_from_to(flipped_img, ((2, 3, 4), affine)) assert_almost_equal(img.dataobj, out.dataobj) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index d15403a881..87d28d8245 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -18,7 +18,7 @@ import nibabel as nib from ..tmpdirs import InTemporaryDirectory from ..loadsave import load -from ..orientations import flip_axis, aff2axcodes, inv_ornt_aff +from ..orientations import aff2axcodes, inv_ornt_aff import unittest import pytest @@ -273,7 +273,7 @@ def test_parrec2nii(): assert code == 1 # Default scaling is dv pr_img = load(fname) - flipped_data = flip_axis(pr_img.get_fdata(), 1) + flipped_data = np.flip(pr_img.get_fdata(), 1) base_cmd = ['parrec2nii', '--overwrite', fname] check_conversion(base_cmd, flipped_data, out_froot) check_conversion(base_cmd + ['--scaling=dv'], @@ -281,12 +281,12 @@ def test_parrec2nii(): out_froot) # fp pr_img = load(fname, scaling='fp') - flipped_data = flip_axis(pr_img.get_fdata(), 1) + flipped_data = np.flip(pr_img.get_fdata(), 1) check_conversion(base_cmd + ['--scaling=fp'], flipped_data, out_froot) # no scaling - unscaled_flipped = flip_axis(pr_img.dataobj.get_unscaled(), 1) + unscaled_flipped = np.flip(pr_img.dataobj.get_unscaled(), 1) check_conversion(base_cmd + ['--scaling=off'], unscaled_flipped, out_froot) @@ -335,7 +335,7 @@ def test_parrec2nii_with_data(): assert np.all(np.abs(aff_off / vox_sizes) <= 0.501) # The data is very close, unless it's the fieldmap if par_root != 'fieldmap': - conved_data_lps = flip_axis(conved_img.dataobj, 1) + conved_data_lps = np.flip(conved_img.dataobj, 1) assert np.allclose(conved_data_lps, philips_img.dataobj) with InTemporaryDirectory(): # Test some options From 4025edf359bb931c50b7bec89389103f2b23143c Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 19:39:22 +0300 Subject: [PATCH 3/9] RF: Removed more py2 compatibility code --- doc/source/devel/register_me.py | 12 +++++------- nisext/sexts.py | 17 +++++------------ 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/doc/source/devel/register_me.py b/doc/source/devel/register_me.py index b377a2d1f7..d2f571f04f 100644 --- a/doc/source/devel/register_me.py +++ b/doc/source/devel/register_me.py @@ -1,10 +1,7 @@ from os.path import join as pjoin, expanduser, abspath, dirname import sys -# Python 3 compatibility -try: - import configparser as cfp -except ImportError: - import ConfigParser as cfp +import configparser as cfp + if sys.platform == 'win32': HOME_INI = pjoin(expanduser('~'), '_dpkg', 'local.dsource') @@ -15,6 +12,7 @@ OUR_META = pjoin(OUR_PATH, 'meta.ini') DISCOVER_INIS = {'user': HOME_INI, 'system': SYS_INI} + def main(): # Get ini file to which to write try: @@ -23,7 +21,7 @@ def main(): reg_to = 'user' if reg_to in ('user', 'system'): ini_fname = DISCOVER_INIS[reg_to] - else: # it is an ini file name + else: # it is an ini file name ini_fname = reg_to # Read parameters for our distribution @@ -42,7 +40,7 @@ def main(): dsource.set(name, version, OUR_PATH) dsource.write(file(ini_fname, 'wt')) - print 'Registered package %s, %s to %s' % (name, version, ini_fname) + print('Registered package %s, %s to %s' % (name, version, ini_fname)) if __name__ == '__main__': diff --git a/nisext/sexts.py b/nisext/sexts.py index b0d34348e5..c8090f5eac 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -2,16 +2,8 @@ import os from os.path import join as pjoin, split as psplit, splitext -import sys -PY3 = sys.version_info[0] >= 3 -if PY3: - string_types = str, -else: - string_types = basestring, -try: - from ConfigParser import ConfigParser -except ImportError: - from configparser import ConfigParser + +from configparser import ConfigParser from distutils.version import LooseVersion from distutils.command.build_py import build_py @@ -19,6 +11,7 @@ from distutils import log + def get_comrec_build(pkg_dir, build_cmd=build_py): """ Return extended build command class for recording commit @@ -82,7 +75,7 @@ def _add_append_key(in_dict, key, value): # Append value to in_dict[key] list if key not in in_dict: in_dict[key] = [] - elif isinstance(in_dict[key], string_types): + elif isinstance(in_dict[key], str): in_dict[key] = [in_dict[key]] in_dict[key].append(value) @@ -179,7 +172,7 @@ def version_getter(pkg_name): + msgs['opt suffix']) return # setuptools mode - if optional_tf and not isinstance(optional, string_types): + if optional_tf and not isinstance(optional, str): raise RuntimeError('Not-False optional arg should be string') dependency = pkg_name if version: From 6fae16b77118a2c42afeb12f0043e1717c493f6e Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 19:50:57 +0300 Subject: [PATCH 4/9] RF: Use triple double-quoted string in docstrings ''' -> """ --- bin/nib-nifti-dx | 2 +- doc/source/dicom/derivations/dicom_mosaic.py | 2 +- .../dicom/derivations/spm_dicom_orient.py | 4 +- doc/tools/apigen.py | 40 +++--- nibabel/analyze.py | 92 ++++++------- nibabel/batteryrunners.py | 40 +++--- nibabel/cifti2/cifti2.py | 54 ++++---- nibabel/cifti2/parse_cifti2.py | 4 +- nibabel/cmdline/nifti_dx.py | 2 +- nibabel/data.py | 42 +++--- nibabel/dataobj_images.py | 14 +- nibabel/ecat.py | 42 +++--- nibabel/environment.py | 8 +- nibabel/eulerangles.py | 28 ++-- nibabel/externals/tests/test_netcdf.py | 2 +- nibabel/filebasedimages.py | 48 +++---- nibabel/fileholders.py | 18 +-- nibabel/filename_parser.py | 14 +- nibabel/freesurfer/mghformat.py | 118 ++++++++-------- nibabel/freesurfer/tests/test_mghformat.py | 6 +- nibabel/funcs.py | 20 +-- nibabel/imageclasses.py | 2 +- nibabel/loadsave.py | 8 +- nibabel/minc1.py | 16 +-- nibabel/minc2.py | 12 +- nibabel/nicom/csareader.py | 20 +-- nibabel/nicom/dicomreaders.py | 14 +- nibabel/nicom/dicomwrappers.py | 2 +- nibabel/nicom/dwiparams.py | 12 +- nibabel/nicom/structreader.py | 18 +-- nibabel/nicom/tests/data_pkgs.py | 2 +- nibabel/nifti1.py | 128 +++++++++--------- nibabel/nifti2.py | 18 +-- nibabel/orientations.py | 22 +-- nibabel/pkg_info.py | 8 +- nibabel/quaternions.py | 56 ++++---- nibabel/spatialimages.py | 48 +++---- nibabel/spm2analyze.py | 10 +- nibabel/spm99analyze.py | 36 ++--- nibabel/testing/__init__.py | 2 +- nibabel/tests/test_analyze.py | 4 +- nibabel/tests/test_batteryrunners.py | 4 +- nibabel/tests/test_data.py | 2 +- nibabel/tests/test_endiancodes.py | 2 +- nibabel/tests/test_euler.py | 6 +- nibabel/tests/test_filename_parser.py | 2 +- nibabel/tests/test_funcs.py | 2 +- nibabel/tests/test_image_load_save.py | 2 +- nibabel/tests/test_image_types.py | 2 +- nibabel/tests/test_nifti1.py | 2 +- nibabel/tests/test_nifti2.py | 2 +- nibabel/tests/test_openers.py | 2 +- nibabel/tests/test_orientations.py | 2 +- nibabel/tests/test_quaternions.py | 4 +- nibabel/tests/test_recoder.py | 2 +- nibabel/tests/test_scaling.py | 2 +- nibabel/tests/test_spm2analyze.py | 2 +- nibabel/tests/test_trackvis.py | 2 +- nibabel/tests/test_volumeutils.py | 8 +- nibabel/tests/test_wrapstruct.py | 16 +-- nibabel/tmpdirs.py | 8 +- nibabel/trackvis.py | 26 ++-- nibabel/tripwire.py | 2 +- nibabel/volumeutils.py | 66 ++++----- nibabel/wrapstruct.py | 76 +++++------ nisext/sexts.py | 8 +- nisext/testers.py | 20 +-- tools/gitwash_dumper.py | 10 +- 68 files changed, 660 insertions(+), 660 deletions(-) diff --git a/bin/nib-nifti-dx b/bin/nib-nifti-dx index d317585286..b395ee1d9a 100755 --- a/bin/nib-nifti-dx +++ b/bin/nib-nifti-dx @@ -7,7 +7,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Print nifti diagnostics for header files ''' +""" Print nifti diagnostics for header files """ from nibabel.cmdline.nifti_dx import main diff --git a/doc/source/dicom/derivations/dicom_mosaic.py b/doc/source/dicom/derivations/dicom_mosaic.py index 5c749f1372..21601a4073 100644 --- a/doc/source/dicom/derivations/dicom_mosaic.py +++ b/doc/source/dicom/derivations/dicom_mosaic.py @@ -1,4 +1,4 @@ -''' Just showing the mosaic simplification ''' +""" Just showing the mosaic simplification """ import sympy from sympy import Matrix, Symbol, symbols, zeros, ones, eye diff --git a/doc/source/dicom/derivations/spm_dicom_orient.py b/doc/source/dicom/derivations/spm_dicom_orient.py index 498d39f05c..d500cfbdf9 100644 --- a/doc/source/dicom/derivations/spm_dicom_orient.py +++ b/doc/source/dicom/derivations/spm_dicom_orient.py @@ -1,11 +1,11 @@ -''' Symbolic versions of the DICOM orientation mathemeatics. +""" Symbolic versions of the DICOM orientation mathemeatics. Notes on the SPM orientation machinery. There are symbolic versions of the code in ``spm_dicom_convert``, ``write_volume`` subfunction, around line 509 in the version I have (SPM8, late 2009 vintage). -''' +""" import numpy as np diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 4be721733d..05498c69a9 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -29,8 +29,8 @@ class ApiDocWriter(object): - ''' Class for automatic detection and parsing of API docs - to Sphinx-parsable reST format''' + """ Class for automatic detection and parsing of API docs + to Sphinx-parsable reST format""" # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] @@ -42,7 +42,7 @@ def __init__(self, module_skip_patterns=None, other_defines=True ): - ''' Initialize package for parsing + """ Initialize package for parsing Parameters ---------- @@ -70,7 +70,7 @@ def __init__(self, other_defines : {True, False}, optional Whether to include classes and functions that are imported in a particular module but not defined there. - ''' + """ if package_skip_patterns is None: package_skip_patterns = ['\\.tests$'] if module_skip_patterns is None: @@ -85,7 +85,7 @@ def get_package_name(self): return self._package_name def set_package_name(self, package_name): - ''' Set package_name + """ Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx @@ -95,7 +95,7 @@ def set_package_name(self, package_name): >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True - ''' + """ # It's also possible to imagine caching the module parsing here self._package_name = package_name root_module = self._import(package_name) @@ -106,7 +106,7 @@ def set_package_name(self, package_name): 'get/set package_name') def _import(self, name): - ''' Import namespace package ''' + """ Import namespace package """ mod = __import__(name) components = name.split('.') for comp in components[1:]: @@ -114,7 +114,7 @@ def _import(self, name): return mod def _get_object_name(self, line): - ''' Get second token in line + """ Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' @@ -122,14 +122,14 @@ def _get_object_name(self, line): 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' - ''' + """ name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): - ''' Convert uri to absolute filepath + """ Convert uri to absolute filepath Parameters ---------- @@ -155,7 +155,7 @@ def _uri2path(self, uri): True >>> docwriter._uri2path('sphinx.does_not_exist') - ''' + """ if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace(self.package_name + '.', '') @@ -171,7 +171,7 @@ def _uri2path(self, uri): return path def _path2uri(self, dirpath): - ''' Convert directory path to uri ''' + """ Convert directory path to uri """ package_dir = self.package_name.replace('.', os.path.sep) relpath = dirpath.replace(self.root_path, package_dir) if relpath.startswith(os.path.sep): @@ -179,7 +179,7 @@ def _path2uri(self, dirpath): return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): - ''' Parse module defined in *uri* ''' + """ Parse module defined in *uri* """ filename = self._uri2path(uri) if filename is None: print(filename, 'erk') @@ -233,7 +233,7 @@ def _parse_module_with_import(self, uri): return functions, classes def _parse_lines(self, linesource): - ''' Parse lines of text for functions and classes ''' + """ Parse lines of text for functions and classes """ functions = [] classes = [] for line in linesource: @@ -254,7 +254,7 @@ def _parse_lines(self, linesource): return functions, classes def generate_api_doc(self, uri): - '''Make autodoc documentation template string for a module + """Make autodoc documentation template string for a module Parameters ---------- @@ -267,7 +267,7 @@ def generate_api_doc(self, uri): Module name, table of contents. body : string Function and class docstrings. - ''' + """ # get the names of all classes and functions functions, classes = self._parse_module_with_import(uri) if not len(functions) and not len(classes) and DEBUG: @@ -317,7 +317,7 @@ def generate_api_doc(self, uri): return head, body def _survives_exclude(self, matchstr, match_type): - ''' Returns True if *matchstr* does not match patterns + """ Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present @@ -336,7 +336,7 @@ def _survives_exclude(self, matchstr, match_type): >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False - ''' + """ if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': @@ -359,7 +359,7 @@ def _survives_exclude(self, matchstr, match_type): return True def discover_modules(self): - ''' Return module sequence discovered from ``self.package_name`` + """ Return module sequence discovered from ``self.package_name`` Parameters @@ -381,7 +381,7 @@ def discover_modules(self): >>> 'sphinx.util' in dw.discover_modules() False >>> - ''' + """ modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 219aa42993..53e01db64c 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to the basic Mayo Analyze format +""" Read / write access to the basic Mayo Analyze format =========================== The Analyze header format @@ -80,7 +80,7 @@ zooms, in particular, negative X zooms. We did not do this because the image can be loaded with and without a default flip, so the saved zoom will not constrain the affine. -''' +""" import numpy as np @@ -171,11 +171,11 @@ class AnalyzeHeader(LabeledWrapStruct): - ''' Class for basic analyze header + """ Class for basic analyze header Implements zoom-only setting of affine transform, and no image scaling - ''' + """ # Copies of module-level definitions template_dtype = header_dtype _data_type_codes = data_type_codes @@ -194,7 +194,7 @@ def __init__(self, binaryblock=None, endianness=None, check=True): - ''' Initialize header from binary data block + """ Initialize header from binary data block Parameters ---------- @@ -247,12 +247,12 @@ def __init__(self, >>> hdr4 = AnalyzeHeader(binblock3) >>> hdr4.endianness == swapped_code True - ''' + """ super(AnalyzeHeader, self).__init__(binaryblock, endianness, check) @classmethod def guessed_endian(klass, hdr): - ''' Guess intended endianness from mapping-like ``hdr`` + """ Guess intended endianness from mapping-like ``hdr`` Parameters ---------- @@ -323,7 +323,7 @@ def guessed_endian(klass, hdr): >>> hdr_data['dim'][0] = 1 >>> AnalyzeHeader.guessed_endian(hdr_data) == native_code True - ''' + """ dim0 = int(hdr['dim'][0]) if dim0 == 0: if hdr['sizeof_hdr'].byteswap() == klass.sizeof_hdr: @@ -335,8 +335,8 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - ''' Return header data for empty header with given endianness - ''' + """ Return header data for empty header with given endianness + """ hdr_data = super(AnalyzeHeader, klass).default_structarr(endianness) hdr_data['sizeof_hdr'] = klass.sizeof_hdr hdr_data['dim'] = 1 @@ -348,7 +348,7 @@ def default_structarr(klass, endianness=None): @classmethod def from_header(klass, header=None, check=True): - ''' Class method to create header from another header + """ Class method to create header from another header Parameters ---------- @@ -362,7 +362,7 @@ def from_header(klass, header=None, check=True): ------- hdr : header instance fresh header instance of our own class - ''' + """ # own type, return copy if type(header) == klass: obj = header.copy() @@ -407,7 +407,7 @@ def from_header(klass, header=None, check=True): return obj def _clean_after_mapping(self): - ''' Set format-specific stuff after converting header from mapping + """ Set format-specific stuff after converting header from mapping This routine cleans up Analyze-type headers that have had their fields set from an Analyze map returned by the ``as_analyze_map`` method. @@ -422,13 +422,13 @@ def _clean_after_mapping(self): magic to "n+1", when it should be "ni1" for the pair header. This method is for that kind of case - so the specific header can set fields like magic correctly, even though the mapping has given a wrong value. - ''' + """ # All current Nifti etc fields that are present in the Analyze header # have the same meaning as they do for Analyze. pass def raw_data_from_fileobj(self, fileobj): - ''' Read unscaled data array from `fileobj` + """ Read unscaled data array from `fileobj` Parameters ---------- @@ -439,14 +439,14 @@ def raw_data_from_fileobj(self, fileobj): ------- arr : ndarray unscaled data array - ''' + """ dtype = self.get_data_dtype() shape = self.get_data_shape() offset = self.get_data_offset() return array_from_file(shape, dtype, fileobj, offset) def data_from_fileobj(self, fileobj): - ''' Read scaled data array from `fileobj` + """ Read scaled data array from `fileobj` Use this routine to get the scaled image data from an image file `fileobj`, given a header `self`. "Scaled" means, with any header @@ -469,7 +469,7 @@ def data_from_fileobj(self, fileobj): data. Raw Analyze files don't have scale factors or intercepts, but this routine also works with formats based on Analyze, that do have scaling, such as SPM analyze formats and NIfTI. - ''' + """ # read unscaled data data = self.raw_data_from_fileobj(fileobj) # get scalings from header. Value of None means not present in header @@ -480,7 +480,7 @@ def data_from_fileobj(self, fileobj): return apply_read_scaling(data, slope, inter) def data_to_fileobj(self, data, fileobj, rescale=True): - ''' Write `data` to `fileobj`, maybe rescaling data, modifying `self` + """ Write `data` to `fileobj`, maybe rescaling data, modifying `self` In writing the data, we match the header to the written data, by setting the header scaling factors, iff `rescale` is True. Thus we @@ -510,7 +510,7 @@ def data_to_fileobj(self, data, fileobj, rescale=True): >>> hdr.data_to_fileobj(data, str_io) >>> data.astype(np.float64).tobytes('F') == str_io.getvalue() True - ''' + """ data = np.asanyarray(data) shape = self.get_data_shape() if data.shape != shape: @@ -532,16 +532,16 @@ def data_to_fileobj(self, data, fileobj, rescale=True): self.set_slope_inter(*get_slope_inter(arr_writer)) def get_data_dtype(self): - ''' Get numpy dtype for data + """ Get numpy dtype for data For examples see ``set_data_dtype`` - ''' + """ code = int(self._structarr['datatype']) dtype = self._data_type_codes.dtype[code] return dtype.newbyteorder(self.endianness) def set_data_dtype(self, datatype): - ''' Set numpy dtype for data from code or dtype or type + """ Set numpy dtype for data from code or dtype or type Examples -------- @@ -564,7 +564,7 @@ def set_data_dtype(self, datatype): Traceback (most recent call last): ... HeaderDataError: data dtype "" known but not supported - ''' + """ dt = datatype if dt not in self._data_type_codes: try: @@ -585,7 +585,7 @@ def set_data_dtype(self, datatype): self._structarr['bitpix'] = dtype.itemsize * 8 def get_data_shape(self): - ''' Get shape of data + """ Get shape of data Examples -------- @@ -600,7 +600,7 @@ def get_data_shape(self): >>> hdr.get_zooms() (1.0, 1.0, 1.0) - ''' + """ dims = self._structarr['dim'] ndims = dims[0] if ndims == 0: @@ -608,7 +608,7 @@ def get_data_shape(self): return tuple(int(d) for d in dims[1:ndims + 1]) def set_data_shape(self, shape): - ''' Set shape of data + """ Set shape of data If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -617,7 +617,7 @@ def set_data_shape(self, shape): ---------- shape : sequence sequence of integers specifying data array shape - ''' + """ dims = self._structarr['dim'] ndims = len(shape) dims[:] = 1 @@ -637,7 +637,7 @@ def set_data_shape(self, shape): self._structarr['pixdim'][ndims + 1:] = 1.0 def get_base_affine(self): - ''' Get affine from basic (shared) header fields + """ Get affine from basic (shared) header fields Note that we get the translations from the center of the image. @@ -654,7 +654,7 @@ def get_base_affine(self): [ 0., 2., 0., -4.], [ 0., 0., 1., -3.], [ 0., 0., 0., 1.]]) - ''' + """ hdr = self._structarr dims = hdr['dim'] ndim = dims[0] @@ -665,7 +665,7 @@ def get_base_affine(self): get_best_affine = get_base_affine def get_zooms(self): - ''' Get zooms from header + """ Get zooms from header Returns ------- @@ -683,7 +683,7 @@ def get_zooms(self): >>> hdr.set_zooms((3, 4)) >>> hdr.get_zooms() (3.0, 4.0) - ''' + """ hdr = self._structarr dims = hdr['dim'] ndim = dims[0] @@ -693,10 +693,10 @@ def get_zooms(self): return tuple(pixdims[1:ndim + 1]) def set_zooms(self, zooms): - ''' Set zooms into header fields + """ Set zooms into header fields See docstring for ``get_zooms`` for examples - ''' + """ hdr = self._structarr dims = hdr['dim'] ndim = dims[0] @@ -754,7 +754,7 @@ def set_data_offset(self, offset): self._structarr['vox_offset'] = offset def get_data_offset(self): - ''' Return offset into data file to read data + """ Return offset into data file to read data Examples -------- @@ -764,18 +764,18 @@ def get_data_offset(self): >>> hdr['vox_offset'] = 12 >>> hdr.get_data_offset() 12 - ''' + """ return int(self._structarr['vox_offset']) def get_slope_inter(self): - ''' Get scalefactor and intercept + """ Get scalefactor and intercept These are not implemented for basic Analyze - ''' + """ return None, None def set_slope_inter(self, slope, inter=None): - ''' Set slope and / or intercept into header + """ Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -791,7 +791,7 @@ def set_slope_inter(self, slope, inter=None): If float, value must be NaN or 1.0 or we raise a ``HeaderTypeError`` inter : None or float, optional If float, value must be 0.0 or we raise a ``HeaderTypeError`` - ''' + """ if ((slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter))): return @@ -800,13 +800,13 @@ def set_slope_inter(self, slope, inter=None): @classmethod def _get_checks(klass): - ''' Return sequence of check functions for this class ''' + """ Return sequence of check functions for this class """ return (klass._chk_sizeof_hdr, klass._chk_datatype, klass._chk_bitpix, klass._chk_pixdims) - ''' Check functions in format expected by BatteryRunner class ''' + """ Check functions in format expected by BatteryRunner class """ @classmethod def _chk_sizeof_hdr(klass, hdr, fix=False): @@ -933,7 +933,7 @@ def set_data_dtype(self, dtype): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - ''' Class method to create image from mapping in ``file_map`` + """ Class method to create image from mapping in ``file_map`` .. deprecated:: 2.4.1 ``keep_file_open='auto'`` is redundant with `False` and has @@ -965,7 +965,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Returns ------- img : AnalyzeImage instance - ''' + """ if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") hdr_fh, img_fh = klass._get_fileholders(file_map) @@ -997,14 +997,14 @@ def _get_fileholders(file_map): return file_map['header'], file_map['image'] def to_file_map(self, file_map=None): - ''' Write image to `file_map` or contained ``self.file_map`` + """ Write image to `file_map` or contained ``self.file_map`` Parameters ---------- file_map : None or mapping, optional files mapping. If None (default) use object's ``file_map`` attribute instead - ''' + """ if file_map is None: file_map = self.file_map data = np.asanyarray(self.dataobj) diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index b77c8b8858..78a887fb56 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Battery runner classes and Report classes +""" Battery runner classes and Report classes These classes / objects are for generic checking / fixing batteries @@ -105,14 +105,14 @@ def chk_pixdims(hdr, fix=True): rep.fix_msg = 'setting to abs of pixdim values' return hdr, rep -''' +""" class BatteryRunner(object): - ''' Class to run set of checks ''' + """ Class to run set of checks """ def __init__(self, checks): - ''' Initialize instance from sequence of `checks` + """ Initialize instance from sequence of `checks` Parameters ---------- @@ -126,11 +126,11 @@ def __init__(self, checks): >>> def chk(obj, fix=False): # minimal check ... return obj, Report() >>> btrun = BatteryRunner((chk,)) - ''' + """ self._checks = checks def check_only(self, obj): - ''' Run checks on `obj` returning reports + """ Run checks on `obj` returning reports Parameters ---------- @@ -142,7 +142,7 @@ def check_only(self, obj): reports : sequence sequence of report objects reporting on result of running checks (without fixes) on `obj` - ''' + """ reports = [] for check in self._checks: obj, rep = check(obj, False) @@ -150,7 +150,7 @@ def check_only(self, obj): return reports def check_fix(self, obj): - ''' Run checks, with fixes, on `obj` returning `obj`, reports + """ Run checks, with fixes, on `obj` returning `obj`, reports Parameters ---------- @@ -163,7 +163,7 @@ def check_fix(self, obj): possibly modified or replaced `obj`, after fixes reports : sequence sequence of reports on checks, fixes - ''' + """ reports = [] for check in self._checks: obj, report = check(obj, True) @@ -181,7 +181,7 @@ def __init__(self, problem_level=0, problem_msg='', fix_msg=''): - ''' Initialize report with values + """ Initialize report with values Parameters ---------- @@ -207,7 +207,7 @@ def __init__(self, >>> rep = Report(TypeError, 10) >>> rep.problem_level 10 - ''' + """ self.error = error self.problem_level = problem_level self.problem_msg = problem_msg @@ -223,7 +223,7 @@ def __getstate__(self): return self.error, self.problem_level, self.problem_msg, self.fix_msg def __eq__(self, other): - ''' are two BatteryRunner-like objects equal? + """ are two BatteryRunner-like objects equal? Parameters ---------- @@ -239,7 +239,7 @@ def __eq__(self, other): >>> rep3 = Report(problem_level=20) >>> rep == rep3 False - ''' + """ return self.__getstate__() == other.__getstate__() def __ne__(self, other): @@ -250,19 +250,19 @@ def __ne__(self, other): return not self == other def __str__(self): - ''' Printable string for object ''' + """ Printable string for object """ return self.__dict__.__str__() @property def message(self): - ''' formatted message string, including fix message if present - ''' + """ formatted message string, including fix message if present + """ if self.fix_msg: return '; '.join((self.problem_msg, self.fix_msg)) return self.problem_msg def log_raise(self, logger, error_level=40): - ''' Log problem, raise error if problem >= `error_level` + """ Log problem, raise error if problem >= `error_level` Parameters ---------- @@ -270,14 +270,14 @@ def log_raise(self, logger, error_level=40): log object, implementing ``log`` method error_level : int, optional If ``self.problem_level`` >= `error_level`, raise error - ''' + """ logger.log(self.problem_level, self.message) if self.problem_level and self.problem_level >= error_level: if self.error: raise self.error(self.problem_msg) def write_raise(self, stream, error_level=40, log_level=30): - ''' Write report to `stream` + """ Write report to `stream` Parameters ---------- @@ -289,7 +289,7 @@ def write_raise(self, stream, error_level=40, log_level=30): log_level : int, optional Such that if `log_level` is >= ``self.problem_level`` we write the report to `stream`, otherwise we write nothing. - ''' + """ if self.problem_level >= log_level: stream.write('Level %s: %s\n' % (self.problem_level, self.message)) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index c83c991573..bd86ebfaa7 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to CIFTI-2 image format +""" Read / write access to CIFTI-2 image format Format of the NIFTI2 container format described here: @@ -15,7 +15,7 @@ Definition of the CIFTI-2 header format and file extensions can be found at: http://www.nitrc.org/projects/cifti -''' +""" import re from collections.abc import MutableSequence, MutableMapping, Iterable from collections import OrderedDict @@ -786,7 +786,7 @@ def _to_xml_element(self): class Cifti2BrainModel(xml.XmlSerializable): - ''' Element representing a mapping of the dimension to vertex or voxels. + """ Element representing a mapping of the dimension to vertex or voxels. Mapping to vertices of voxels must be specified. @@ -840,7 +840,7 @@ class Cifti2BrainModel(xml.XmlSerializable): Indices on the image towards where the array indices are mapped vertex_indices : Cifti2VertexIndices, optional Indices of the vertices towards where the array indices are mapped - ''' + """ def __init__(self, index_offset=None, index_count=None, model_type=None, brain_structure=None, n_surface_vertices=None, @@ -1126,9 +1126,9 @@ def _get_indices_from_mim(self, mim): @property def mapped_indices(self): - ''' + """ List of matrix indices that are mapped - ''' + """ mapped_indices = [] for v in self: a2md = self._get_indices_from_mim(v) @@ -1136,7 +1136,7 @@ def mapped_indices(self): return mapped_indices def get_index_map(self, index): - ''' + """ Cifti2 Mapping class for a given index Parameters @@ -1150,7 +1150,7 @@ def get_index_map(self, index): cifti2_map : Cifti2MatrixIndicesMap Returns the Cifti2MatrixIndicesMap corresponding to the given index. - ''' + """ for v in self: a2md = self._get_indices_from_mim(v) @@ -1205,7 +1205,7 @@ def _to_xml_element(self): return mat def get_axis(self, index): - ''' + """ Generates the Cifti2 axis for a given dimension Parameters @@ -1216,7 +1216,7 @@ def get_axis(self, index): Returns ------- axis : :class:`.cifti2_axes.Axis` - ''' + """ from . import cifti2_axes return cifti2_axes.from_index_mapping(self.get_index_map(index)) @@ -1238,7 +1238,7 @@ def get_data_shape(self): class Cifti2Header(FileBasedHeader, xml.XmlSerializable): - ''' Class for CIFTI-2 header extension ''' + """ Class for CIFTI-2 header extension """ def __init__(self, matrix=None, version="2.0"): FileBasedHeader.__init__(self) @@ -1263,20 +1263,20 @@ def may_contain_header(klass, binaryblock): @property def number_of_mapped_indices(self): - ''' + """ Number of mapped indices - ''' + """ return len(self.matrix) @property def mapped_indices(self): - ''' + """ List of matrix indices that are mapped - ''' + """ return self.matrix.mapped_indices def get_index_map(self, index): - ''' + """ Cifti2 Mapping class for a given index Parameters @@ -1290,11 +1290,11 @@ def get_index_map(self, index): cifti2_map : Cifti2MatrixIndicesMap Returns the Cifti2MatrixIndicesMap corresponding to the given index. - ''' + """ return self.matrix.get_index_map(index) def get_axis(self, index): - ''' + """ Generates the Cifti2 axis for a given dimension Parameters @@ -1305,12 +1305,12 @@ def get_axis(self, index): Returns ------- axis : :class:`.cifti2_axes.Axis` - ''' + """ return self.matrix.get_axis(index) @classmethod def from_axes(cls, axes): - ''' + """ Creates a new Cifti2 header based on the Cifti2 axes Parameters @@ -1322,7 +1322,7 @@ def from_axes(cls, axes): ------- header : Cifti2Header new header describing the rows/columns in a format consistent with Cifti2 - ''' + """ from . import cifti2_axes return cifti2_axes.to_header(axes) @@ -1342,7 +1342,7 @@ def __init__(self, nifti_header=None, extra=None, file_map=None): - ''' Initialize image + """ Initialize image The image is a combination of (dataobj, header), with optional metadata in `nifti_header` (a NIfTI2 header). There may be more metadata in the @@ -1365,7 +1365,7 @@ def __init__(self, Extra metadata not captured by `header` or `nifti_header`. file_map : mapping, optional Mapping giving file information for this image format. - ''' + """ if not isinstance(header, Cifti2Header) and header: header = Cifti2Header.from_axes(header) super(Cifti2Image, self).__init__(dataobj, header=header, @@ -1423,7 +1423,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): @classmethod def from_image(klass, img): - ''' Class method to create new instance of own class from `img` + """ Class method to create new instance of own class from `img` Parameters ---------- @@ -1434,7 +1434,7 @@ def from_image(klass, img): ------- cimg : instance Image, of our own class - ''' + """ if isinstance(img, klass): return img raise NotImplementedError @@ -1474,7 +1474,7 @@ def to_file_map(self, file_map=None): img.to_file_map(file_map or self.file_map) def update_headers(self): - ''' Harmonize NIfTI headers with image data + """ Harmonize NIfTI headers with image data >>> import numpy as np >>> data = np.zeros((2,3,4)) @@ -1484,7 +1484,7 @@ def update_headers(self): >>> img.update_headers() >>> img.nifti_header.get_data_shape() == (2, 3, 4) True - ''' + """ self._nifti_header.set_data_shape(self._dataobj.shape) def get_data_dtype(self): diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 8c3d40cd56..50d2c3a5c0 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -76,7 +76,7 @@ def _mangle(self, value): class _Cifti2AsNiftiHeader(Nifti2Header): - ''' Class for Cifti2 header extension ''' + """ Class for Cifti2 header extension """ @classmethod def _valid_intent_code(klass, intent_code): @@ -126,7 +126,7 @@ class _Cifti2AsNiftiImage(Nifti2Image): class Cifti2Parser(xml.XmlParser): - '''Class to parse an XML string into a CIFTI-2 header object''' + """Class to parse an XML string into a CIFTI-2 header object""" def __init__(self, encoding=None, buffer_size=3500000, verbose=0): super(Cifti2Parser, self).__init__(encoding=encoding, buffer_size=buffer_size, diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index e478b5a5c2..259c24d97d 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -7,7 +7,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Print nifti diagnostics for header files ''' +""" Print nifti diagnostics for header files """ import sys from optparse import OptionParser diff --git a/nibabel/data.py b/nibabel/data.py index 6208ebe7d5..999270b2b4 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -32,10 +32,10 @@ class BomberError(DataError, AttributeError): class Datasource(object): - ''' Simple class to add base path to relative path ''' + """ Simple class to add base path to relative path """ def __init__(self, base_path): - ''' Initialize datasource + """ Initialize datasource Parameters ---------- @@ -49,11 +49,11 @@ def __init__(self, base_path): >>> fname = repo.get_filename('somedir', 'afile.txt') >>> fname == pjoin('a', 'path', 'somedir', 'afile.txt') True - ''' + """ self.base_path = base_path def get_filename(self, *path_parts): - ''' Prepend base path to `*path_parts` + """ Prepend base path to `*path_parts` We make no check whether the returned path exists. @@ -67,11 +67,11 @@ def get_filename(self, *path_parts): result of ``os.path.join(*path_parts), with ``self.base_path`` prepended - ''' + """ return pjoin(self.base_path, *path_parts) def list_files(self, relative=True): - ''' Recursively list the files in the data source directory. + """ Recursively list the files in the data source directory. Parameters ---------- @@ -84,7 +84,7 @@ def list_files(self, relative=True): file_list: list of strings List of the paths of all the files in the data source. - ''' + """ out_list = list() for base, dirs, files in os.walk(self.base_path): if relative: @@ -95,12 +95,12 @@ def list_files(self, relative=True): class VersionedDatasource(Datasource): - ''' Datasource with version information in config file + """ Datasource with version information in config file - ''' + """ def __init__(self, base_path, config_filename=None): - ''' Initialize versioned datasource + """ Initialize versioned datasource We assume that there is a configuration file with version information in datasource directory tree. @@ -120,7 +120,7 @@ def __init__(self, base_path, config_filename=None): config_filaname : None or str relative path to configuration file containing version - ''' + """ Datasource.__init__(self, base_path) if config_filename is None: config_filename = 'config.ini' @@ -153,7 +153,7 @@ def _cfg_value(fname, section='DATA', value='path'): def get_data_path(): - ''' Return specified or guessed locations of NIPY data files + """ Return specified or guessed locations of NIPY data files The algorithm is to return paths, extracted from strings, where strings are found in the following order: @@ -193,7 +193,7 @@ def get_data_path(): * https://www.debian.org/doc/packaging-manuals/python-policy/ap-packaging_tools.html#s-distutils * https://www.mail-archive.com/debian-python@lists.debian.org/msg05084.html - ''' + """ paths = [] try: var = os.environ['NIPY_DATA_PATH'] @@ -217,7 +217,7 @@ def get_data_path(): def find_data_dir(root_dirs, *names): - ''' Find relative path given path prefixes to search + """ Find relative path given path prefixes to search We raise a DataError if we can't find the relative path @@ -234,7 +234,7 @@ def find_data_dir(root_dirs, *names): data_dir : str full path (root path added to `*names` above) - ''' + """ ds_relative = pjoin(*names) for path in root_dirs: pth = pjoin(path, ds_relative) @@ -246,7 +246,7 @@ def find_data_dir(root_dirs, *names): def make_datasource(pkg_def, **kwargs): - ''' Return datasource defined by `pkg_def` as found in `data_path` + """ Return datasource defined by `pkg_def` as found in `data_path` `data_path` is the only allowed keyword argument. @@ -280,7 +280,7 @@ def make_datasource(pkg_def, **kwargs): ------- datasource : ``VersionedDatasource`` An initialized ``VersionedDatasource`` instance - ''' + """ if any(key for key in kwargs if key != 'data_path'): raise ValueError('Unexpected keyword argument(s)') data_path = kwargs.get('data_path') @@ -305,14 +305,14 @@ def make_datasource(pkg_def, **kwargs): class Bomber(object): - ''' Class to raise an informative error when used ''' + """ Class to raise an informative error when used """ def __init__(self, name, msg): self.name = name self.msg = msg def __getattr__(self, attr_name): - ''' Raise informative error accessing not-found attributes ''' + """ Raise informative error accessing not-found attributes """ raise BomberError( 'Trying to access attribute "%s" ' 'of non-existent data "%s"\n\n%s\n' % @@ -320,7 +320,7 @@ def __getattr__(self, attr_name): def datasource_or_bomber(pkg_def, **options): - ''' Return a viable datasource or a Bomber + """ Return a viable datasource or a Bomber This is to allow module level creation of datasource objects. We create the objects, so that, if the data exist, and are the correct @@ -341,7 +341,7 @@ def datasource_or_bomber(pkg_def, **options): Returns ------- ds : datasource or ``Bomber`` instance - ''' + """ unix_relpath = pkg_def['relpath'] version = pkg_def.get('min version') pkg_hint = pkg_def.get('install hint', DEFAULT_INSTALL_HINT) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 4d86810d5d..e0e3d52849 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -15,10 +15,10 @@ class DataobjImage(FileBasedImage): - ''' Template class for images that have dataobj data stores''' + """ Template class for images that have dataobj data stores""" def __init__(self, dataobj, header=None, extra=None, file_map=None): - ''' Initialize dataobj image + """ Initialize dataobj image The datobj image is a combination of (dataobj, header), with optional metadata in `extra`, and filename / file-like objects contained in the @@ -37,7 +37,7 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): metadata of this image type file_map : mapping, optional mapping giving file information for this image format - ''' + """ super(DataobjImage, self).__init__(header=header, extra=extra, file_map=file_map) self._dataobj = dataobj @@ -415,7 +415,7 @@ def get_shape(self): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - ''' Class method to create image from mapping in ``file_map`` + """ Class method to create image from mapping in ``file_map`` .. deprecated:: 2.4.1 ``keep_file_open='auto'`` is redundant with `False` and has @@ -447,12 +447,12 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Returns ------- img : DataobjImage instance - ''' + """ raise NotImplementedError @classmethod def from_filename(klass, filename, *, mmap=True, keep_file_open=None): - '''Class method to create image from filename `filename` + """Class method to create image from filename `filename` .. deprecated:: 2.4.1 ``keep_file_open='auto'`` is redundant with `False` and has @@ -481,7 +481,7 @@ def from_filename(klass, filename, *, mmap=True, keep_file_open=None): Returns ------- img : DataobjImage instance - ''' + """ if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") file_map = klass.filespec_to_file_map(filename) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 8a945bc887..1b986f9c4c 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -291,8 +291,8 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - ''' Return header data for empty header with given endianness - ''' + """ Return header data for empty header with given endianness + """ hdr_data = super(EcatHeader, klass).default_structarr(endianness) hdr_data['magic_number'] = 'MATRIX72' hdr_data['sw_version'] = 74 @@ -323,7 +323,7 @@ def get_filetype(self): @classmethod def _get_checks(klass): - ''' Return sequence of check functions for this class ''' + """ Return sequence of check functions for this class """ return () @@ -597,7 +597,7 @@ def _get_frame_offset(self, frame=0): return int(self._mlist[frame][1] * BLOCK_SIZE) def _get_oriented_data(self, raw_data, orientation=None): - ''' + """ Get data oriented following ``patient_orientation`` header field. If the ``orientation`` parameter is given, return data according to this orientation. @@ -605,7 +605,7 @@ def _get_oriented_data(self, raw_data, orientation=None): :param raw_data: Numpy array containing the raw data :param orientation: None (default), 'neurological' or 'radiological' :rtype: Numpy array containing the oriented data - ''' + """ if orientation is None: orientation = self._header['patient_orientation'] elif orientation == 'neurological': @@ -624,7 +624,7 @@ def _get_oriented_data(self, raw_data, orientation=None): return raw_data def raw_data_from_fileobj(self, frame=0, orientation=None): - ''' + """ Get raw data from file object. :param frame: Time frame index from where to fetch data @@ -632,7 +632,7 @@ def raw_data_from_fileobj(self, frame=0, orientation=None): :rtype: Numpy array containing (possibly oriented) raw data .. seealso:: data_from_fileobj - ''' + """ dtype = self._get_data_dtype(frame) if self._header.endianness is not native_code: dtype = dtype.newbyteorder(self._header.endianness) @@ -644,7 +644,7 @@ def raw_data_from_fileobj(self, frame=0, orientation=None): return raw_data def data_from_fileobj(self, frame=0, orientation=None): - ''' + """ Read scaled data from file for a given frame :param frame: Time frame index from where to fetch data @@ -652,7 +652,7 @@ def data_from_fileobj(self, frame=0, orientation=None): :rtype: Numpy array containing (possibly oriented) raw data .. seealso:: raw_data_from_fileobj - ''' + """ header = self._header subhdr = self.subheaders[frame] raw_data = self.raw_data_from_fileobj(frame, orientation) @@ -663,11 +663,11 @@ def data_from_fileobj(self, frame=0, orientation=None): class EcatImageArrayProxy(object): - ''' Ecat implemention of array proxy protocol + """ Ecat implemention of array proxy protocol The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. - ''' + """ def __init__(self, subheader): self._subheader = subheader @@ -689,7 +689,7 @@ def is_proxy(self): return True def __array__(self, dtype=None): - ''' Read of data from file + """ Read of data from file This reads ALL FRAMES into one array, can be memory expensive. @@ -705,7 +705,7 @@ def __array__(self, dtype=None): ------- array Scaled image data with type `dtype`. - ''' + """ # dtype=None is interpreted as float64 data = np.empty(self.shape) frame_mapping = get_frame_order(self._subheader._mlist) @@ -837,13 +837,13 @@ def get_frame_affine(self, frame): return self._subheader.get_frame_affine(frame=frame) def get_frame(self, frame, orientation=None): - ''' + """ Get full volume for a time frame :param frame: Time frame index from where to fetch data :param orientation: None (default), 'neurological' or 'radiological' :rtype: Numpy array containing (possibly oriented) raw data - ''' + """ return self._subheader.data_from_fileobj(frame, orientation) def get_data_dtype(self, frame): @@ -915,21 +915,21 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return img def _get_empty_dir(self): - ''' + """ Get empty directory entry of the form [numAvail, nextDir, previousDir, numUsed] - ''' + """ return np.array([31, 2, 0, 0], dtype=np.int32) def _write_data(self, data, stream, pos, dtype=None, endianness=None): - ''' + """ Write data to ``stream`` using an array_writer :param data: Numpy array containing the dat :param stream: The file-like object to write the data to :param pos: The position in the stream to write the data to :param endianness: Endianness code of the data to write - ''' + """ if dtype is None: dtype = data.dtype @@ -941,7 +941,7 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None): dtype).to_fileobj(stream) def to_file_map(self, file_map=None): - ''' Write ECAT7 image to `file_map` or contained ``self.file_map`` + """ Write ECAT7 image to `file_map` or contained ``self.file_map`` The format consist of: @@ -950,7 +950,7 @@ def to_file_map(self, file_map=None): - For every frame (3D volume in 4D data) - A subheader (size = frame_offset) - Frame data (3D volume) - ''' + """ if file_map is None: file_map = self.file_map diff --git a/nibabel/environment.py b/nibabel/environment.py index 95cb08700c..768b4de34b 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -1,8 +1,8 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' +""" Settings from the system environment relevant to NIPY -''' +""" import os from os.path import join as pjoin @@ -66,7 +66,7 @@ def get_nipy_user_dir(): def get_nipy_system_dir(): - r''' Get systemwide NIPY configuration file directory + r""" Get systemwide NIPY configuration file directory On posix systems this will be ``/etc/nipy``. On Windows, the directory is less useful, but by default it will be @@ -87,7 +87,7 @@ def get_nipy_system_dir(): Examples -------- >>> pth = get_nipy_system_dir() - ''' + """ if os.name == 'nt': return r'C:\etc\nipy' if os.name == 'posix': diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index 0928cd39d3..11a10bbe2b 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Module implementing Euler angle rotations and their conversions +""" Module implementing Euler angle rotations and their conversions See: @@ -81,7 +81,7 @@ The convention of rotation around ``z``, followed by rotation around ``y``, followed by rotation around ``x``, is known (confusingly) as "xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. -''' +""" import math @@ -94,7 +94,7 @@ def euler2mat(z=0, y=0, x=0): - ''' Return matrix for rotations around z, y and x axes + """ Return matrix for rotations around z, y and x axes Uses the z, then y, then x convention above @@ -165,7 +165,7 @@ def euler2mat(z=0, y=0, x=0): curl your fingers; the direction your fingers curl is the direction of rotation). Therefore, the rotations are counterclockwise if looking along the axis of rotation from positive to negative. - ''' + """ Ms = [] if z: cosz = math.cos(z) @@ -191,7 +191,7 @@ def euler2mat(z=0, y=0, x=0): def mat2euler(M, cy_thresh=None): - ''' Discover Euler angle vector from 3x3 matrix + """ Discover Euler angle vector from 3x3 matrix Uses the conventions above. @@ -241,7 +241,7 @@ def mat2euler(M, cy_thresh=None): The code appears to be licensed (from the website) as "can be used without restrictions". - ''' + """ M = np.asarray(M) if cy_thresh is None: try: @@ -264,7 +264,7 @@ def mat2euler(M, cy_thresh=None): def euler2quat(z=0, y=0, x=0): - ''' Return quaternion corresponding to these Euler angles + """ Return quaternion corresponding to these Euler angles Uses the z, then y, then x convention above @@ -294,7 +294,7 @@ def euler2quat(z=0, y=0, x=0): 3. Apply quaternion multiplication formula - https://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to formulae from 2.) to give formula for combined rotations. - ''' + """ z = z / 2.0 y = y / 2.0 x = x / 2.0 @@ -311,7 +311,7 @@ def euler2quat(z=0, y=0, x=0): def quat2euler(q): - ''' Return Euler angles corresponding to quaternion `q` + """ Return Euler angles corresponding to quaternion `q` Parameters ---------- @@ -333,14 +333,14 @@ def quat2euler(q): combining parts of the ``quat2mat`` and ``mat2euler`` functions, but the reduction in computation is small, and the code repetition is large. - ''' + """ # delayed import to avoid cyclic dependencies from . import quaternions as nq return mat2euler(nq.quat2mat(q)) def euler2angle_axis(z=0, y=0, x=0): - ''' Return angle, axis corresponding to these Euler angles + """ Return angle, axis corresponding to these Euler angles Uses the z, then y, then x convention above @@ -367,14 +367,14 @@ def euler2angle_axis(z=0, y=0, x=0): 1.5 >>> np.allclose(vec, [0, 1, 0]) True - ''' + """ # delayed import to avoid cyclic dependencies from . import quaternions as nq return nq.quat2angle_axis(euler2quat(z, y, x)) def angle_axis2euler(theta, vector, is_normalized=False): - ''' Convert angle, axis pair to Euler angles + """ Convert angle, axis pair to Euler angles Parameters ---------- @@ -405,7 +405,7 @@ def angle_axis2euler(theta, vector, is_normalized=False): combining parts of the ``angle_axis2mat`` and ``mat2euler`` functions, but the reduction in computation is small, and the code repetition is large. - ''' + """ # delayed import to avoid cyclic dependencies from . import quaternions as nq M = nq.angle_axis2mat(theta, vector, is_normalized) diff --git a/nibabel/externals/tests/test_netcdf.py b/nibabel/externals/tests/test_netcdf.py index f85393be4e..08a336d26f 100644 --- a/nibabel/externals/tests/test_netcdf.py +++ b/nibabel/externals/tests/test_netcdf.py @@ -1,4 +1,4 @@ -''' Tests for netcdf ''' +""" Tests for netcdf """ import os from os.path import join as pjoin, dirname diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 90bbd8e652..fdc8a00e7f 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Common interface for any image format--volume or surface, binary or xml.''' +""" Common interface for any image format--volume or surface, binary or xml.""" import io from copy import deepcopy @@ -22,7 +22,7 @@ class ImageFileError(Exception): class FileBasedHeader(object): - ''' Template class to implement header protocol ''' + """ Template class to implement header protocol """ @classmethod def from_header(klass, header=None): @@ -51,16 +51,16 @@ def __ne__(self, other): return not self == other def copy(self): - ''' Copy object to independent representation + """ Copy object to independent representation The copy should not be affected by any changes to the original object. - ''' + """ return deepcopy(self) class FileBasedImage(object): - ''' + """ Abstract image class with interface for loading/saving images from disk. The class doesn't define any image properties. @@ -162,7 +162,7 @@ class FileBasedImage(object): carry the position at which a write (with ``to_files``) should place the data. The ``file_map`` contents should therefore be such, that this will work: - ''' + """ header_class = FileBasedHeader _meta_sniff_len = 0 files_types = (('image', None),) @@ -173,7 +173,7 @@ class FileBasedImage(object): rw = True # Used in test code def __init__(self, header=None, extra=None, file_map=None): - ''' Initialize image + """ Initialize image The image is a combination of (header), with optional metadata in `extra`, and filename / file-like objects @@ -188,7 +188,7 @@ def __init__(self, header=None, extra=None, file_map=None): metadata of this image type file_map : mapping, optional mapping giving file information for this image format - ''' + """ self._header = self.header_class.from_header(header) if extra is None: extra = {} @@ -203,8 +203,8 @@ def header(self): return self._header def __getitem__(self): - ''' No slicing or dictionary interface for images - ''' + """ No slicing or dictionary interface for images + """ raise TypeError("Cannot slice image objects.") @deprecate_with_version('get_header method is deprecated.\n' @@ -217,7 +217,7 @@ def get_header(self): return self.header def get_filename(self): - ''' Fetch the image filename + """ Fetch the image filename Parameters ---------- @@ -230,7 +230,7 @@ def get_filename(self): If an image may have several filenames associated with it (e.g. Analyze ``.img, .hdr`` pair) then we return the more characteristic filename (the ``.img`` filename in the case of Analyze') - ''' + """ # which filename is returned depends on the ordering of the # 'files_types' class attribute - we return the name # corresponding to the first in that tuple @@ -238,7 +238,7 @@ def get_filename(self): return self.file_map[characteristic_type].filename def set_filename(self, filename): - ''' Sets the files in the object from a given filename + """ Sets the files in the object from a given filename The different image formats may check whether the filename has an extension characteristic of the format, and raise an error if @@ -251,7 +251,7 @@ def set_filename(self, filename): this will be the only filename set into the image ``.file_map`` attribute. Otherwise, the image instance will try and guess the other filenames from this given filename. - ''' + """ self.file_map = self.__class__.filespec_to_file_map(filename) @classmethod @@ -317,7 +317,7 @@ def filespec_to_files(klass, filespec): return klass.filespec_to_file_map(filespec) def to_filename(self, filename): - ''' Write image to files implied by filename string + """ Write image to files implied by filename string Parameters ---------- @@ -329,7 +329,7 @@ def to_filename(self, filename): Returns ------- None - ''' + """ self.file_map = self.filespec_to_file_map(filename) self.to_file_map() @@ -350,7 +350,7 @@ def to_files(self, file_map=None): @classmethod def make_file_map(klass, mapping=None): - ''' Class method to make files holder for this image type + """ Class method to make files holder for this image type Parameters ---------- @@ -366,7 +366,7 @@ def make_file_map(klass, mapping=None): sequence klass.files_types, and values of type FileHolder, where FileHolder objects have default values, other than those given by `mapping` - ''' + """ if mapping is None: mapping = {} file_map = {} @@ -383,7 +383,7 @@ def make_file_map(klass, mapping=None): @classmethod def instance_to_filename(klass, img, filename): - ''' Save `img` in our own format, to name implied by `filename` + """ Save `img` in our own format, to name implied by `filename` This is a class method @@ -393,13 +393,13 @@ def instance_to_filename(klass, img, filename): filename : str Filename, implying name to which to save image. - ''' + """ img = klass.from_image(img) img.to_filename(filename) @classmethod def from_image(klass, img): - ''' Class method to create new instance of own class from `img` + """ Class method to create new instance of own class from `img` Parameters ---------- @@ -410,7 +410,7 @@ def from_image(klass, img): ------- cimg : ``spatialimage`` instance Image, of our own class - ''' + """ raise NotImplementedError() @classmethod @@ -514,7 +514,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): class SerializableImage(FileBasedImage): - ''' + """ Abstract image class for (de)serializing images to/from byte strings. The class doesn't define any image properties. @@ -562,7 +562,7 @@ class SerializableImage(FileBasedImage): images) currently do not support this interface. For multi-file images, ``to_bytes()`` and ``from_bytes()`` must be overridden, and any encoding details should be documented. - ''' + """ @classmethod def from_bytes(klass, bytestring): diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index 35cfd3c348..c996725991 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Fileholder class ''' +""" Fileholder class """ from copy import copy @@ -18,14 +18,14 @@ class FileHolderError(Exception): class FileHolder(object): - ''' class to contain filename, fileobj and file position - ''' + """ class to contain filename, fileobj and file position + """ def __init__(self, filename=None, fileobj=None, pos=0): - ''' Initialize FileHolder instance + """ Initialize FileHolder instance Parameters ---------- @@ -37,13 +37,13 @@ def __init__(self, pos : int, optional position in filename or fileobject at which to start reading or writing data; defaults to 0 - ''' + """ self.filename = filename self.fileobj = fileobj self.pos = pos def get_prepare_fileobj(self, *args, **kwargs): - ''' Return fileobj if present, or return fileobj from filename + """ Return fileobj if present, or return fileobj from filename Set position to that given in self.pos @@ -62,7 +62,7 @@ def get_prepare_fileobj(self, *args, **kwargs): fileobj : file-like object object has position set (via ``fileobj.seek()``) to ``self.pos`` - ''' + """ if self.fileobj is not None: obj = ImageOpener(self.fileobj) # for context manager obj.seek(self.pos) @@ -99,7 +99,7 @@ def file_like(self): def copy_file_map(file_map): - r''' Copy mapping of fileholders given by `file_map` + r""" Copy mapping of fileholders given by `file_map` Parameters ---------- @@ -111,7 +111,7 @@ def copy_file_map(file_map): fm_copy : dict Copy of `file_map`, using shallow copy of ``FileHolder``\s - ''' + """ fm_copy = {} for key, fh in file_map.items(): fm_copy[key] = copy(fh) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index ed04610fdd..5d84a9d6dc 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Create filename pairs, triplets etc, with expected extensions ''' +""" Create filename pairs, triplets etc, with expected extensions """ import os import pathlib @@ -50,7 +50,7 @@ def types_filenames(template_fname, types_exts, trailing_suffixes=('.gz', '.bz2'), enforce_extensions=True, match_case=False): - ''' Return filenames with standard extensions from template name + """ Return filenames with standard extensions from template name The typical case is returning image and header filenames for an Analyze image, that expects an 'image' file type with extension ``.img``, @@ -108,7 +108,7 @@ def types_filenames(template_fname, types_exts, ... enforce_extensions=False) >>> tfns == {'t1': '/path/test.funny', 't2': '/path/test.ext2'} True - ''' + """ template_fname = _stringify_path(template_fname) if not isinstance(template_fname, str): raise TypesFilenamesError('Need file name as input ' @@ -175,7 +175,7 @@ def parse_filename(filename, types_exts, trailing_suffixes, match_case=False): - '''Split filename into fileroot, extension, trailing suffix; guess type. + """Split filename into fileroot, extension, trailing suffix; guess type. Parameters ---------- @@ -217,7 +217,7 @@ def parse_filename(filename, ('/path/fname', 'ext2', None, 't2') >>> parse_filename('/path/fnameext2.gz', types_exts, ('.gz',)) ('/path/fname', 'ext2', '.gz', 't2') - ''' + """ filename = _stringify_path(filename) ignored = None @@ -256,7 +256,7 @@ def _iendswith(whole, end): def splitext_addext(filename, addexts=('.gz', '.bz2'), match_case=False): - ''' Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` + """ Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` where ``.gz`` may be any of passed `addext` trailing suffixes. @@ -286,7 +286,7 @@ def splitext_addext(filename, ('fname', '.ext', '') >>> splitext_addext('fname.ext.foo', ('.foo', '.bar')) ('fname', '.ext', '.foo') - ''' + """ filename = _stringify_path(filename) if match_case: diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 3d85ad4ea4..72a754efe8 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -6,10 +6,10 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Header and image reading / writing functions for MGH image format +""" Header and image reading / writing functions for MGH image format Author: Krish Subramaniam -''' +""" from os.path import splitext import numpy as np @@ -81,11 +81,11 @@ class MGHError(Exception): class MGHHeader(LabeledWrapStruct): - ''' Class for MGH format header + """ Class for MGH format header The header also consists of the footer data which MGH places after the data chunk. - ''' + """ # Copies of module-level definitions template_dtype = hf_dtype _hdrdtype = header_dtype @@ -95,7 +95,7 @@ class MGHHeader(LabeledWrapStruct): def __init__(self, binaryblock=None, check=True): - ''' Initialize header from binary data block + """ Initialize header from binary data block Parameters ---------- @@ -105,7 +105,7 @@ def __init__(self, check : bool, optional Whether to check content of header in initialization. Default is True. - ''' + """ min_size = self._hdrdtype.itemsize full_size = self.template_dtype.itemsize if binaryblock is not None and len(binaryblock) >= min_size: @@ -138,8 +138,8 @@ def _get_checks(klass): @classmethod def from_header(klass, header=None, check=True): - ''' Class method to create MGH header from another MGH header - ''' + """ Class method to create MGH header from another MGH header + """ # own type, return copy if type(header) == klass: obj = header.copy() @@ -152,9 +152,9 @@ def from_header(klass, header=None, check=True): @classmethod def from_fileobj(klass, fileobj, check=True): - ''' + """ classmethod for loading a MGH fileobject - ''' + """ # We need the following hack because MGH data stores header information # after the data chunk too. We read the header initially, deduce the # dimensions from the header, skip over and then read the footer @@ -172,12 +172,12 @@ def from_fileobj(klass, fileobj, check=True): return klass(hdr_str + ftr_str, check=check) def get_affine(self): - ''' Get the affine transform from the header information. + """ Get the affine transform from the header information. MGH format doesn't store the transform directly. Instead it's gleaned from the zooms ( delta ), direction cosines ( Mdc ), RAS centers ( Pxyz_c ) and the dimensions. - ''' + """ hdr = self._structarr MdcD = hdr['Mdc'].T * hdr['delta'] vol_center = MdcD.dot(hdr['dims'][:3]) / 2 @@ -187,14 +187,14 @@ def get_affine(self): get_best_affine = get_affine def get_vox2ras(self): - '''return the get_affine() - ''' + """return the get_affine() + """ return self.get_affine() def get_vox2ras_tkr(self): - ''' Get the vox2ras-tkr transform. See "Torig" here: + """ Get the vox2ras-tkr transform. See "Torig" here: https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems - ''' + """ ds = self._structarr['delta'] ns = self._structarr['dims'][:3] * ds / 2.0 v2rtkr = np.array([[-ds[0], 0, 0, ns[0]], @@ -204,22 +204,22 @@ def get_vox2ras_tkr(self): return v2rtkr def get_ras2vox(self): - '''return the inverse get_affine() - ''' + """return the inverse get_affine() + """ return np.linalg.inv(self.get_affine()) def get_data_dtype(self): - ''' Get numpy dtype for MGH data + """ Get numpy dtype for MGH data For examples see ``set_data_dtype`` - ''' + """ code = int(self._structarr['type']) dtype = self._data_type_codes.numpy_dtype[code] return dtype def set_data_dtype(self, datatype): - ''' Set numpy dtype for data from code or dtype or type - ''' + """ Set numpy dtype for data from code or dtype or type + """ try: code = self._data_type_codes[datatype] except KeyError: @@ -227,7 +227,7 @@ def set_data_dtype(self, datatype): self._structarr['type'] = code def _ndims(self): - ''' Get dimensionality of data + """ Get dimensionality of data MGH does not encode dimensionality explicitly, so an image where the fourth dimension is 1 is treated as three-dimensional. @@ -235,11 +235,11 @@ def _ndims(self): Returns ------- ndims : 3 or 4 - ''' + """ return 3 + (self._structarr['dims'][3] > 1) def get_zooms(self): - ''' Get zooms from header + """ Get zooms from header Returns the spacing of voxels in the x, y, and z dimensions. For four-dimensional files, a fourth zoom is included, equal to the @@ -254,13 +254,13 @@ def get_zooms(self): tuple of header zoom values .. _mghformat: https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat#line-82 - ''' + """ # Do not return time zoom (TR) if 3D image tzoom = (self['tr'],) if self._ndims() > 3 else () return tuple(self._structarr['delta']) + tzoom def set_zooms(self, zooms): - ''' Set zooms into header fields + """ Set zooms into header fields Sets the spacing of voxels in the x, y, and z dimensions. For four-dimensional files, a temporal zoom (repetition time, or TR, in @@ -271,7 +271,7 @@ def set_zooms(self, zooms): zooms : sequence sequence of floats specifying spatial and (optionally) temporal zooms - ''' + """ hdr = self._structarr zooms = np.asarray(zooms) ndims = self._ndims() @@ -289,8 +289,8 @@ def set_zooms(self, zooms): hdr['tr'] = zooms[3] def get_data_shape(self): - ''' Get shape of data - ''' + """ Get shape of data + """ shape = tuple(self._structarr['dims']) # If last dimension (nframes) is 1, remove it because # we want to maintain 3D and it's redundant @@ -299,13 +299,13 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - ''' Set shape of data + """ Set shape of data Parameters ---------- shape : sequence sequence of integers specifying data array shape - ''' + """ shape = tuple(shape) if len(shape) > 4: raise ValueError("Shape may be at most 4 dimensional") @@ -313,29 +313,29 @@ def set_data_shape(self, shape): self._structarr['delta'] = 1 def get_data_bytespervox(self): - ''' Get the number of bytes per voxel of the data - ''' + """ Get the number of bytes per voxel of the data + """ return int(self._data_type_codes.bytespervox[ int(self._structarr['type'])]) def get_data_size(self): - ''' Get the number of bytes the data chunk occupies. - ''' + """ Get the number of bytes the data chunk occupies. + """ return self.get_data_bytespervox() * np.prod(self._structarr['dims']) def get_data_offset(self): - ''' Return offset into data file to read data - ''' + """ Return offset into data file to read data + """ return DATA_OFFSET def get_footer_offset(self): - ''' Return offset where the footer resides. + """ Return offset where the footer resides. Occurs immediately after the data chunk. - ''' + """ return self.get_data_offset() + self.get_data_size() def data_from_fileobj(self, fileobj): - ''' Read data array from `fileobj` + """ Read data array from `fileobj` Parameters ---------- @@ -346,7 +346,7 @@ def data_from_fileobj(self, fileobj): ------- arr : ndarray data array - ''' + """ dtype = self.get_data_dtype() shape = self.get_data_shape() offset = self.get_data_offset() @@ -364,10 +364,10 @@ def guessed_endian(klass, mapping): @classmethod def default_structarr(klass, endianness=None): - ''' Return header data for empty header + """ Return header data for empty header Ignores byte order; always big endian - ''' + """ if endianness is not None and endian_codes[endianness] != '>': raise ValueError('MGHHeader must always be big endian') structarr = super(MGHHeader, @@ -381,15 +381,15 @@ def default_structarr(klass, endianness=None): return structarr def _set_affine_default(self): - ''' If goodRASFlag is 0, set the default affine - ''' + """ If goodRASFlag is 0, set the default affine + """ self._structarr['goodRASFlag'] = 1 self._structarr['delta'] = 1 self._structarr['Mdc'] = [[-1, 0, 0], [0, 0, 1], [0, -1, 0]] self._structarr['Pxyz_c'] = 0 def writehdr_to(self, fileobj): - ''' Write header to fileobj + """ Write header to fileobj Write starts at the beginning. @@ -401,7 +401,7 @@ def writehdr_to(self, fileobj): Returns ------- None - ''' + """ hdr_nofooter = np.ndarray((), dtype=self._hdrdtype, buffer=self.binaryblock) # goto the very beginning of the file-like obj @@ -409,7 +409,7 @@ def writehdr_to(self, fileobj): fileobj.write(hdr_nofooter.tobytes()) def writeftr_to(self, fileobj): - ''' Write footer to fileobj + """ Write footer to fileobj Footer data is located after the data chunk. So move there and write. @@ -421,7 +421,7 @@ def writeftr_to(self, fileobj): Returns ------- None - ''' + """ ftr_loc_in_hdr = len(self.binaryblock) - self._ftrdtype.itemsize ftr_nd = np.ndarray((), dtype=self._ftrdtype, buffer=self.binaryblock, offset=ftr_loc_in_hdr) @@ -429,11 +429,11 @@ def writeftr_to(self, fileobj): fileobj.write(ftr_nd.tobytes()) def copy(self): - ''' Return copy of structure ''' + """ Return copy of structure """ return self.__class__(self.binaryblock, check=False) def as_byteswapped(self, endianness=None): - ''' Return new object with given ``endianness`` + """ Return new object with given ``endianness`` If big endian, returns a copy of the object. Otherwise raises ValueError. @@ -448,7 +448,7 @@ def as_byteswapped(self, endianness=None): wstr : ``MGHHeader`` ``MGHHeader`` object - ''' + """ if endianness is None or endian_codes[endianness] != '>': raise ValueError('Cannot byteswap MGHHeader - ' 'must always be big endian') @@ -537,7 +537,7 @@ def filespec_to_file_map(klass, filespec): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - ''' Class method to create image from mapping in ``file_map`` + """ Class method to create image from mapping in ``file_map`` .. deprecated:: 2.4.1 ``keep_file_open='auto'`` is redundant with `False` and has @@ -569,7 +569,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): Returns ------- img : MGHImage instance - ''' + """ if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") img_fh = file_map['image'] @@ -584,14 +584,14 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return img def to_file_map(self, file_map=None): - ''' Write image to `file_map` or contained ``self.file_map`` + """ Write image to `file_map` or contained ``self.file_map`` Parameters ---------- file_map : None or mapping, optional files mapping. If None (default) use object's ``file_map`` attribute instead - ''' + """ if file_map is None: file_map = self.file_map data = np.asanyarray(self.dataobj) @@ -605,7 +605,7 @@ def to_file_map(self, file_map=None): self.file_map = file_map def _write_data(self, mghfile, data, header): - ''' Utility routine to write image + """ Utility routine to write image Parameters ---------- @@ -616,7 +616,7 @@ def _write_data(self, mghfile, data, header): array to write header : analyze-type header object header - ''' + """ shape = header.get_data_shape() if data.shape != shape: raise HeaderDataError('Data should be shape (%s)' % diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index e1cfc56b18..f05d85f905 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -'''Tests for mghformat reading writing''' +"""Tests for mghformat reading writing""" import os import io @@ -162,9 +162,9 @@ def test_set_zooms(): def bad_dtype_mgh(): - ''' This function raises an MGHError exception because + """ This function raises an MGHError exception because uint16 is not a valid MGH datatype. - ''' + """ # try to write an unsigned short and make sure it # raises MGHError v = np.ones((7, 13, 3, 22)).astype(np.uint16) diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 2f293c4434..21246d8ec6 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -7,7 +7,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Processor functions for images ''' +""" Processor functions for images """ import numpy as np from .orientations import io_orientation, OrientationError @@ -15,7 +15,7 @@ def squeeze_image(img): - ''' Return image, remove axes length 1 at end of image shape + """ Return image, remove axes length 1 at end of image shape For example, an image may have shape (10,20,30,1,1). In this case squeeze will result in an image with shape (10,20,30). See doctests @@ -65,7 +65,7 @@ def squeeze_image(img): >>> img2 = squeeze_image(img) >>> img2.shape == (1, 1, 5, 1, 2) True - ''' + """ klass = img.__class__ shape = img.shape slen = len(shape) @@ -87,7 +87,7 @@ def squeeze_image(img): def concat_images(images, check_affines=True, axis=None): - r''' Concatenate images in list to single image, along specified dimension + r""" Concatenate images in list to single image, along specified dimension Parameters ---------- @@ -107,7 +107,7 @@ def concat_images(images, check_affines=True, axis=None): concat_img : ``SpatialImage`` New image resulting from concatenating `images` across last dimension - ''' + """ images = [load(img) if not hasattr(img, 'get_data') else img for img in images] n_imgs = len(images) @@ -155,7 +155,7 @@ def concat_images(images, check_affines=True, axis=None): def four_to_three(img): - ''' Create 3D images from 4D image by slicing over last axis + """ Create 3D images from 4D image by slicing over last axis Parameters ---------- @@ -168,7 +168,7 @@ def four_to_three(img): ------- imgs : list list of 3D images - ''' + """ arr = np.asanyarray(img.dataobj) header = img.header affine = img.affine @@ -184,7 +184,7 @@ def four_to_three(img): def as_closest_canonical(img, enforce_diag=False): - ''' Return `img` with data reordered to be closest to canonical + """ Return `img` with data reordered to be closest to canonical Canonical order is the ordering of the output axes. @@ -204,7 +204,7 @@ def as_closest_canonical(img, enforce_diag=False): orientation. We modify the affine accordingly. If `img` is already has the correct data ordering, we just return `img` unmodified. - ''' + """ # Get the image class to transform the data for us img = img.as_reoriented(io_orientation(img.affine)) @@ -216,6 +216,6 @@ def as_closest_canonical(img, enforce_diag=False): def _aff_is_diag(aff): - ''' Utility function returning True if affine is nearly diagonal ''' + """ Utility function returning True if affine is nearly diagonal """ rzs_aff = aff[:3, :3] return np.allclose(rzs_aff, np.diag(np.diag(rzs_aff))) diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index c1a0b7133a..1d33db8ed1 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Define supported image classes and names ''' +""" Define supported image classes and names """ from .analyze import AnalyzeImage from .brikhead import AFNIImage diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index f8c3e3be0b..85713aa24b 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -21,7 +21,7 @@ def load(filename, **kwargs): - r''' Load file given filename, guessing at file type + r""" Load file given filename, guessing at file type Parameters ---------- @@ -34,7 +34,7 @@ def load(filename, **kwargs): ------- img : ``SpatialImage`` Image of guessed type - ''' + """ filename = _stringify_path(filename) # Check file exists and is not empty @@ -83,7 +83,7 @@ def guessed_image_type(filename): def save(img, filename): - ''' Save an image to file adapting format to `filename` + """ Save an image to file adapting format to `filename` Parameters ---------- @@ -95,7 +95,7 @@ def save(img, filename): Returns ------- None - ''' + """ filename = _stringify_path(filename) # Save the type as expected diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 6dfe7dde67..ebf883d7b8 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -42,12 +42,12 @@ class MincError(Exception): class Minc1File(object): - ''' Class to wrap MINC1 format opened netcdf object + """ Class to wrap MINC1 format opened netcdf object Although it has some of the same methods as a ``Header``, we use this only when reading a MINC file, to pull out useful header information, and for the method of reading the data out - ''' + """ def __init__(self, mincfile): self._mincfile = mincfile @@ -118,12 +118,12 @@ def get_affine(self): return aff def _get_valid_range(self): - ''' Return valid range for image data + """ Return valid range for image data The valid range can come from the image 'valid_range' or image 'valid_min' and 'valid_max', or, failing that, from the data type range - ''' + """ ddt = self.get_data_dtype() info = np.iinfo(ddt.type) try: @@ -238,11 +238,11 @@ def get_scaled_data(self, sliceobj=()): class MincImageArrayProxy(object): - ''' MINC implementation of array proxy protocol + """ MINC implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. - ''' + """ def __init__(self, minc_file): self.minc_file = minc_file @@ -308,12 +308,12 @@ def may_contain_header(klass, binaryblock): class Minc1Image(SpatialImage): - ''' Class for MINC1 format images + """ Class for MINC1 format images The MINC1 image class uses the default header type, rather than a specific MINC header type - and reads the relevant information from the MINC file on load. - ''' + """ header_class = Minc1Header _meta_sniff_len = 4 valid_exts = ('.mnc',) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 90b039d8da..ba5d91d47a 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -40,12 +40,12 @@ def __init__(self, var): class Minc2File(Minc1File): - ''' Class to wrap MINC2 format file + """ Class to wrap MINC2 format file Although it has some of the same methods as a ``Header``, we use this only when reading a MINC2 file, to pull out useful header information, and for the method of reading the data out - ''' + """ def __init__(self, mincfile): self._mincfile = mincfile @@ -86,11 +86,11 @@ def get_data_shape(self): return self._image.shape def _get_valid_range(self): - ''' Return valid range for image data + """ Return valid range for image data The valid range can come from the image 'valid_range' or failing that, from the data type range - ''' + """ ddt = self.get_data_dtype() info = np.iinfo(ddt.type) try: @@ -144,12 +144,12 @@ def may_contain_header(klass, binaryblock): class Minc2Image(Minc1Image): - ''' Class for MINC2 images + """ Class for MINC2 images The MINC2 image class uses the default header type, rather than a specific MINC header type - and reads the relevant information from the MINC file on load. - ''' + """ # MINC2 does not do compressed whole files _compressed_suffixes = () header_class = Minc2Header diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 1764e2878c..6ef089e301 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -1,6 +1,6 @@ -''' CSA header reader from SPM spec +""" CSA header reader from SPM spec -''' +""" import numpy as np from .structreader import Unpacker @@ -30,7 +30,7 @@ class CSAReadError(CSAError): def get_csa_header(dcm_data, csa_type='image'): - ''' Get CSA header information from DICOM header + """ Get CSA header information from DICOM header Return None if the header does not contain CSA information of the specified `csa_type` @@ -49,7 +49,7 @@ def get_csa_header(dcm_data, csa_type='image'): csa_info : None or dict Parsed CSA field of `csa_type` or None, if we cannot find the CSA information. - ''' + """ csa_type = csa_type.lower() if csa_type == 'image': element_offset = 0x10 @@ -72,7 +72,7 @@ def get_csa_header(dcm_data, csa_type='image'): def read(csa_str): - ''' Read CSA header from string `csa_str` + """ Read CSA header from string `csa_str` Parameters ---------- @@ -85,7 +85,7 @@ def read(csa_str): header information as dict, where `header` has fields (at least) ``type, n_tags, tags``. ``header['tags']`` is also a dictionary with one key, value pair for each tag in the header. - ''' + """ csa_len = len(csa_str) csa_dict = {'tags': {}} hdr_id = csa_str[:4] @@ -185,7 +185,7 @@ def get_vector(csa_dict, tag_name, n): def is_mosaic(csa_dict): - ''' Return True if the data is of Mosaic type + """ Return True if the data is of Mosaic type Parameters ---------- @@ -197,7 +197,7 @@ def is_mosaic(csa_dict): tf : bool True if the `dcm_data` appears to be of Siemens mosaic type, False otherwise - ''' + """ if csa_dict is None: return False if get_acq_mat_txt(csa_dict) is None: @@ -244,7 +244,7 @@ def get_ice_dims(csa_dict): def nt_str(s): - ''' Strip string to first null + """ Strip string to first null Parameters ---------- @@ -254,7 +254,7 @@ def nt_str(s): ------- sdash : str s stripped to first occurence of null (0) - ''' + """ zero_pos = s.find(b'\x00') if zero_pos == -1: return s diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index ad8d9c6b64..dee8b507d5 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -16,7 +16,7 @@ class DicomReadError(Exception): def mosaic_to_nii(dcm_data): - ''' Get Nifti file from Siemens + """ Get Nifti file from Siemens Parameters ---------- @@ -27,7 +27,7 @@ def mosaic_to_nii(dcm_data): ------- img : ``Nifti1Image`` Nifti image object - ''' + """ dcm_w = wrapper_from_data(dcm_data) if not dcm_w.is_mosaic: raise DicomReadError('data does not appear to be in mosaic format') @@ -45,7 +45,7 @@ def read_mosaic_dwi_dir(dicom_path, globber='*.dcm', dicom_kwargs=None): def read_mosaic_dir(dicom_path, globber='*.dcm', check_is_dwi=False, dicom_kwargs=None): - ''' Read all Siemens mosaic DICOMs in directory, return arrays, params + """ Read all Siemens mosaic DICOMs in directory, return arrays, params Parameters ---------- @@ -74,7 +74,7 @@ def read_mosaic_dir(dicom_path, unit_gradients : (N, 3) array gradient directions of unit length for each acquisition. (nan, nan, nan) if we did not find diffusion information. - ''' + """ if dicom_kwargs is None: dicom_kwargs = {} full_globber = pjoin(dicom_path, globber) @@ -116,7 +116,7 @@ def read_mosaic_dir(dicom_path, def slices_to_series(wrappers): - ''' Sort sequence of slice wrappers into series + """ Sort sequence of slice wrappers into series This follows the SPM model fairly closely @@ -130,7 +130,7 @@ def slices_to_series(wrappers): series : sequence sequence of sequences of wrapper objects, where each sequence is wrapper objects comprising a series, sorted into slice order - ''' + """ # first pass volume_lists = [wrappers[0:1]] for dw in wrappers[1:]: @@ -171,7 +171,7 @@ def _instance_sorter(s): def _third_pass(wrappers): - ''' What we do when there are not unique zs in a slice set ''' + """ What we do when there are not unique zs in a slice set """ inos = [s.instance_number for s in wrappers] msg_fmt = ('Plausibly matching slices, but where some have ' 'the same apparent slice location, and %s; ' diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index f37d0323a8..b718b980aa 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -600,7 +600,7 @@ def image_orient_patient(self): @one_time def voxel_sizes(self): - ''' Get i, j, k voxel sizes ''' + """ Get i, j, k voxel sizes """ try: pix_measures = self.shared.PixelMeasuresSequence[0] except AttributeError: diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index e9d05c0d57..6146897377 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -1,4 +1,4 @@ -''' Process diffusion imaging parameters +""" Process diffusion imaging parameters * ``q`` is a vector in Q space * ``b`` is a b value @@ -18,13 +18,13 @@ B ~ (q_est . q_est.T) / norm(q_est) -''' +""" import numpy as np import numpy.linalg as npl def B2q(B, tol=None): - ''' Estimate q vector from input B matrix `B` + """ Estimate q vector from input B matrix `B` We require that the input `B` is symmetric positive definite. @@ -47,7 +47,7 @@ def B2q(B, tol=None): ------- q : (3,) vector Estimated q vector from B matrix `B` - ''' + """ B = np.asarray(B) if not np.allclose(B - B.T, 0): raise ValueError('B matrix is not symmetric enough') @@ -68,7 +68,7 @@ def B2q(B, tol=None): def nearest_pos_semi_def(B): - ''' Least squares positive semi-definite tensor estimation + """ Least squares positive semi-definite tensor estimation Reference: Niethammer M, San Jose Estepar R, Bouix S, Shenton M, Westin CF. On diffusion tensor estimation. Conf Proc IEEE Eng Med @@ -92,7 +92,7 @@ def nearest_pos_semi_def(B): array([[ 0.75, 0. , 0. ], [ 0. , 0.75, 0. ], [ 0. , 0. , 0. ]]) - ''' + """ B = np.asarray(B) vals, vecs = npl.eigh(B) # indices of eigenvalues in descending order diff --git a/nibabel/nicom/structreader.py b/nibabel/nicom/structreader.py index c40975b168..644f50d345 100644 --- a/nibabel/nicom/structreader.py +++ b/nibabel/nicom/structreader.py @@ -1,4 +1,4 @@ -''' Stream-like reader for packed data ''' +""" Stream-like reader for packed data """ from struct import Struct @@ -6,7 +6,7 @@ class Unpacker(object): - ''' Class to unpack values from buffer object + """ Class to unpack values from buffer object The buffer object is usually a string. Caches compiled :mod:`struct` format strings so that repeated unpacking with the same format @@ -26,10 +26,10 @@ class Unpacker(object): True >>> upk.ptr 7 - ''' + """ def __init__(self, buf, ptr=0, endian=None): - ''' Initialize unpacker + """ Initialize unpacker Parameters ---------- @@ -43,14 +43,14 @@ def __init__(self, buf, ptr=0, endian=None): behavior of ``struct`` - assuming system endian unless you specify the byte order specifically in the format string passed to ``unpack`` - ''' + """ self.buf = buf self.ptr = ptr self.endian = endian self._cache = {} def unpack(self, fmt): - ''' Unpack values from contained buffer + """ Unpack values from contained buffer Unpacks values from ``self.buf`` and updates ``self.ptr`` to the position after the read data. @@ -64,7 +64,7 @@ def unpack(self, fmt): ------- values : tuple values as unpacked from ``self.buf`` according to `fmt` - ''' + """ # try and get a struct corresponding to the format string from # the cache pkst = self._cache.get(fmt) @@ -89,7 +89,7 @@ def unpack(self, fmt): return values def read(self, n_bytes=-1): - ''' Return byte string of length `n_bytes` at current position + """ Return byte string of length `n_bytes` at current position Returns sub-string from ``self.buf`` and updates ``self.ptr`` to the position after the read data. @@ -103,7 +103,7 @@ def read(self, n_bytes=-1): Returns ------- s : byte string - ''' + """ start = self.ptr if n_bytes == -1: end = len(self.buf) diff --git a/nibabel/nicom/tests/data_pkgs.py b/nibabel/nicom/tests/data_pkgs.py index 56c135fd5b..2424666a72 100644 --- a/nibabel/nicom/tests/data_pkgs.py +++ b/nibabel/nicom/tests/data_pkgs.py @@ -1,4 +1,4 @@ -''' Data packages for DICOM testing ''' +""" Data packages for DICOM testing """ from ... import data as nibd diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index dbaf4a97b6..202decd8e0 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -6,10 +6,10 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to NIfTI1 image format +""" Read / write access to NIfTI1 image format NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ -''' +""" import warnings from io import BytesIO @@ -355,7 +355,7 @@ def __ne__(self, other): return not self == other def write_to(self, fileobj, byteswap): - ''' Write header extensions to fileobj + """ Write header extensions to fileobj Write starts at fileobj current file position. @@ -369,7 +369,7 @@ def write_to(self, fileobj, byteswap): Returns ------- None - ''' + """ extstart = fileobj.tell() rawsize = self.get_sizeondisk() # write esize and ecode first @@ -526,7 +526,7 @@ def __cmp__(self, other): return cmp(list(self), list(other)) def write_to(self, fileobj, byteswap): - ''' Write header extensions to fileobj + """ Write header extensions to fileobj Write starts at fileobj current file position. @@ -540,13 +540,13 @@ def write_to(self, fileobj, byteswap): Returns ------- None - ''' + """ for e in self: e.write_to(fileobj, byteswap) @classmethod def from_fileobj(klass, fileobj, size, byteswap): - '''Read header extensions from a fileobj + """Read header extensions from a fileobj Parameters ---------- @@ -562,7 +562,7 @@ def from_fileobj(klass, fileobj, size, byteswap): ------- An extension list. This list might be empty in case not extensions were present in fileobj. - ''' + """ # make empty extension list extensions = klass() # assume the file pointer is at the beginning of any extensions. @@ -611,7 +611,7 @@ def from_fileobj(klass, fileobj, size, byteswap): class Nifti1Header(SpmAnalyzeHeader): - ''' Class for NIfTI1 header + """ Class for NIfTI1 header The NIfTI1 header has many more coded fields than the simpler Analyze variants. NIfTI1 headers also have extensions. @@ -622,7 +622,7 @@ class Nifti1Header(SpmAnalyzeHeader): data, extension reading, and writing the correct magic string. This class handles the header-preceding-data case. - ''' + """ # Copies of module level definitions template_dtype = header_dtype _data_type_codes = data_type_codes @@ -661,18 +661,18 @@ def __init__(self, endianness=None, check=True, extensions=()): - ''' Initialize header from binary data block and extensions - ''' + """ Initialize header from binary data block and extensions + """ super(Nifti1Header, self).__init__(binaryblock, endianness, check) self.extensions = self.exts_klass(extensions) def copy(self): - ''' Return copy of header + """ Return copy of header Take reference to extensions as well as copy of header contents - ''' + """ return self.__class__( self.binaryblock, self.endianness, @@ -726,7 +726,7 @@ def write_to(self, fileobj): self.extensions.write_to(fileobj, byteswap) def get_best_affine(self): - ''' Select best of available transforms ''' + """ Select best of available transforms """ hdr = self._structarr if hdr['sform_code'] != 0: return self.get_sform() @@ -736,7 +736,7 @@ def get_best_affine(self): @classmethod def default_structarr(klass, endianness=None): - ''' Create empty header binary block with given endianness ''' + """ Create empty header binary block with given endianness """ hdr_data = super(Nifti1Header, klass).default_structarr(endianness) if klass.is_single: hdr_data['magic'] = klass.single_magic @@ -746,7 +746,7 @@ def default_structarr(klass, endianness=None): @classmethod def from_header(klass, header=None, check=True): - ''' Class method to create header from another header + """ Class method to create header from another header Extend Analyze header copy by copying extensions from other Nifti types. @@ -763,14 +763,14 @@ def from_header(klass, header=None, check=True): ------- hdr : header instance fresh header instance of our own class - ''' + """ new_hdr = super(Nifti1Header, klass).from_header(header, check) if isinstance(header, Nifti1Header): new_hdr.extensions[:] = header.extensions[:] return new_hdr def get_data_shape(self): - ''' Get shape of data + """ Get shape of data Examples -------- @@ -793,7 +793,7 @@ def get_data_shape(self): Allows for freesurfer hack for 7th order icosahedron surface described in `issue 309`_, load_nifti.m_, and `save_nifti.m `_. - ''' + """ shape = super(Nifti1Header, self).get_data_shape() # Apply freesurfer hack for large vectors if shape[:3] == (-1, 1, 1): @@ -809,7 +809,7 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - ''' Set shape of data # noqa + """ Set shape of data # noqa If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -858,7 +858,7 @@ def set_data_shape(self, shape): .. _load_nifti.m: https://github.com/fieldtrip/fieldtrip/blob/428798b/external/freesurfer/load_nifti.m#L86-L89 .. _standard header: http://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h - ''' + """ hdr = self._structarr shape = tuple(shape) @@ -883,10 +883,10 @@ def set_data_shape(self, shape): super(Nifti1Header, self).set_data_shape(shape) def get_qform_quaternion(self): - ''' Compute quaternion from b, c, d of quaternion + """ Compute quaternion from b, c, d of quaternion Fills a value by assuming this is a unit quaternion - ''' + """ hdr = self._structarr bcd = [hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d']] # Adjust threshold to precision of stored values in header @@ -934,7 +934,7 @@ def get_qform(self, coded=False): return out def set_qform(self, affine, code=None, strip_shears=True): - ''' Set qform header values from 4x4 affine + """ Set qform header values from 4x4 affine Parameters ---------- @@ -989,7 +989,7 @@ def set_qform(self, affine, code=None, strip_shears=True): >>> hdr.set_qform(None) >>> int(hdr['qform_code']) 0 - ''' + """ hdr = self._structarr old_code = hdr['qform_code'] if code is None: @@ -1067,7 +1067,7 @@ def get_sform(self, coded=False): return out def set_sform(self, affine, code=None): - ''' Set sform transform from 4x4 affine + """ Set sform transform from 4x4 affine Parameters ---------- @@ -1108,7 +1108,7 @@ def set_sform(self, affine, code=None): >>> hdr.set_sform(None) >>> int(hdr['sform_code']) 0 - ''' + """ hdr = self._structarr old_code = hdr['sform_code'] if code is None: @@ -1129,7 +1129,7 @@ def set_sform(self, affine, code=None): hdr['srow_z'][:] = affine[2, :] def get_slope_inter(self): - ''' Get data scaling (slope) and DC offset (intercept) from header data + """ Get data scaling (slope) and DC offset (intercept) from header data Returns ------- @@ -1160,7 +1160,7 @@ def get_slope_inter(self): Traceback (most recent call last): ... HeaderDataError: Valid slope but invalid intercept inf - ''' + """ # Note that we are returning float (float64) scalefactors and # intercepts, although they are stored as in nifti1 as float32. slope = float(self['scl_slope']) @@ -1173,7 +1173,7 @@ def get_slope_inter(self): return slope, inter def set_slope_inter(self, slope, inter=None): - ''' Set slope and / or intercept into header + """ Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -1192,7 +1192,7 @@ def set_slope_inter(self, slope, inter=None): Intercept. If None, implies `inter` of NaN. If `slope` is None or NaN then `inter` should be None or NaN. Values of Inf or -Inf raise HeaderDataError - ''' + """ if slope is None: slope = np.nan if inter is None: @@ -1207,7 +1207,7 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_inter'] = inter def get_dim_info(self): - ''' Gets NIfTI MRI slice etc dimension information + """ Gets NIfTI MRI slice etc dimension information Returns ------- @@ -1231,7 +1231,7 @@ def get_dim_info(self): -------- See set_dim_info function - ''' + """ hdr = self._structarr info = int(hdr['dim_info']) freq = info & 3 @@ -1242,7 +1242,7 @@ def get_dim_info(self): slice - 1 if slice else None) def set_dim_info(self, freq=None, phase=None, slice=None): - ''' Sets nifti MRI slice etc dimension information + """ Sets nifti MRI slice etc dimension information Parameters ---------- @@ -1274,7 +1274,7 @@ def set_dim_info(self, freq=None, phase=None, slice=None): Notes ----- This is stored in one byte in the header - ''' + """ for inp in (freq, phase, slice): # Don't use == on None to avoid a FutureWarning in python3 if inp is not None and inp not in (0, 1, 2): @@ -1289,7 +1289,7 @@ def set_dim_info(self, freq=None, phase=None, slice=None): self._structarr['dim_info'] = info def get_intent(self, code_repr='label'): - ''' Get intent code, parameters and name + """ Get intent code, parameters and name Parameters ---------- @@ -1314,7 +1314,7 @@ def get_intent(self, code_repr='label'): ('t test', (10.0,), 'some score') >>> hdr.get_intent('code') (3, (10.0,), 'some score') - ''' + """ hdr = self._structarr recoder = self._field_recoders['intent_code'] code = int(hdr['intent_code']) @@ -1334,7 +1334,7 @@ def get_intent(self, code_repr='label'): return label, tuple(params), name def set_intent(self, code, params=(), name='', allow_unknown=False): - ''' Set the intent code, parameters and name + """ Set the intent code, parameters and name If parameters are not specified, assumed to be all zero. Each intent code has a set number of parameters associated. If you @@ -1382,7 +1382,7 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): >>> hdr.set_intent(9999, allow_unknown=True) # unknown code >>> hdr.get_intent() ('unknown code 9999', (), '') - ''' + """ hdr = self._structarr known_intent = code in intent_codes if not known_intent: @@ -1407,7 +1407,7 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): hdr['intent_p%d' % (i + 1)] = param def get_slice_duration(self): - ''' Get slice duration + """ Get slice duration Returns ------- @@ -1426,7 +1426,7 @@ def get_slice_duration(self): ----- The NIfTI1 spec appears to require the slice dimension to be defined for slice_duration to have meaning. - ''' + """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: raise HeaderDataError('Slice dimension must be set ' @@ -1434,7 +1434,7 @@ def get_slice_duration(self): return float(self._structarr['slice_duration']) def set_slice_duration(self, duration): - ''' Set slice duration + """ Set slice duration Parameters ---------- @@ -1444,7 +1444,7 @@ def set_slice_duration(self, duration): Examples -------- See ``get_slice_duration`` - ''' + """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: raise HeaderDataError('Slice dimension must be set ' @@ -1452,8 +1452,8 @@ def set_slice_duration(self, duration): self._structarr['slice_duration'] = duration def get_n_slices(self): - ''' Return the number of slices - ''' + """ Return the number of slices + """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: raise HeaderDataError('Slice dimension not set in header ' @@ -1468,7 +1468,7 @@ def get_n_slices(self): return slice_len def get_slice_times(self): - ''' Get slice times from slice timing information + """ Get slice times from slice timing information Returns ------- @@ -1490,7 +1490,7 @@ def get_slice_times(self): >>> slice_times = hdr.get_slice_times() >>> np.allclose(slice_times, [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) True - ''' + """ hdr = self._structarr slice_len = self.get_n_slices() duration = self.get_slice_duration() @@ -1514,7 +1514,7 @@ def get_slice_times(self): (None,) * (slice_len - slice_end - 1)) def set_slice_times(self, slice_times): - ''' Set slice times into *hdr* + """ Set slice times into *hdr* Parameters ---------- @@ -1536,7 +1536,7 @@ def set_slice_times(self, slice_times): 1 >>> int(hdr['slice_end']) 5 - ''' + """ # Check if number of slices matches header hdr = self._structarr slice_len = self.get_n_slices() @@ -1597,7 +1597,7 @@ def set_slice_times(self, slice_times): hdr['slice_code'] = slice_order_codes.code[label] def _slice_time_order(self, slabel, n_slices): - ''' Supporting function to give time order of slices from label ''' + """ Supporting function to give time order of slices from label """ if slabel == 'sequential increasing': sp_ind_time_order = list(range(n_slices)) elif slabel == 'sequential decreasing': @@ -1637,18 +1637,18 @@ def set_xyzt_units(self, xyz=None, t=None): self.structarr['xyzt_units'] = xyz_code + t_code def _clean_after_mapping(self): - ''' Set format-specific stuff after converting header from mapping + """ Set format-specific stuff after converting header from mapping Clean up header after it has been initialized from an ``as_analyze_map`` method of another header type See :meth:`nibabel.analyze.AnalyzeHeader._clean_after_mapping` for a more detailed description. - ''' + """ self._structarr['magic'] = (self.single_magic if self.is_single else self.pair_magic) - ''' Checks only below here ''' + """ Checks only below here """ @classmethod def _get_checks(klass): @@ -1751,7 +1751,7 @@ def may_contain_header(klass, binaryblock): class Nifti1PairHeader(Nifti1Header): - ''' Class for NIfTI1 pair header ''' + """ Class for NIfTI1 pair header """ # Signal whether this is single (header + data) file is_single = False @@ -1774,7 +1774,7 @@ def __init__(self, dataobj, affine, header=None, if header is None and affine is not None: self._affine2header() # Copy docstring - __init__.__doc__ = analyze.AnalyzeImage.__init__.__doc__ + ''' + __init__.__doc__ = analyze.AnalyzeImage.__init__.__doc__ + """ Notes ----- @@ -1786,10 +1786,10 @@ def __init__(self, dataobj, affine, header=None, space to which the affine is pointing. The :meth:`set_sform` and :meth:`set_qform` methods can be used to update the codes after an image has been created - see those methods, and the :ref:`manual - ` for more details. ''' + ` for more details. """ def update_header(self): - ''' Harmonize header with image data and affine + """ Harmonize header with image data and affine See AnalyzeImage.update_header for more examples @@ -1803,7 +1803,7 @@ def update_header(self): True >>> np.all(hdr.get_sform() == affine) True - ''' + """ super(Nifti1Pair, self).update_header() hdr = self._header hdr['magic'] = hdr.pair_magic @@ -1843,7 +1843,7 @@ def get_qform(self, coded=False): return self._header.get_qform(coded) def set_qform(self, affine, code=None, strip_shears=True, **kwargs): - ''' Set qform header values from 4x4 affine + """ Set qform header values from 4x4 affine Parameters ---------- @@ -1892,7 +1892,7 @@ def set_qform(self, affine, code=None, strip_shears=True, **kwargs): True >>> int(code) 3 - ''' + """ update_affine = kwargs.pop('update_affine', True) if kwargs: raise TypeError('Unexpected keyword argument(s) %s' % kwargs) @@ -1930,7 +1930,7 @@ def get_sform(self, coded=False): return self._header.get_sform(coded) def set_sform(self, affine, code=None, **kwargs): - ''' Set sform transform from 4x4 affine + """ Set sform transform from 4x4 affine Parameters ---------- @@ -1981,7 +1981,7 @@ def set_sform(self, affine, code=None, **kwargs): True >>> int(code) 3 - ''' + """ update_affine = kwargs.pop('update_affine', True) if kwargs: raise TypeError('Unexpected keyword argument(s) %s' % kwargs) @@ -2040,7 +2040,7 @@ def _get_fileholders(file_map): return file_map['image'], file_map['image'] def update_header(self): - ''' Harmonize header with image data and affine ''' + """ Harmonize header with image data and affine """ super(Nifti1Image, self).update_header() hdr = self._header hdr['magic'] = hdr.single_magic diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 45e834b29a..8c58569d96 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -6,12 +6,12 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to NIfTI2 image format +""" Read / write access to NIfTI2 image format Format described here: https://www.nitrc.org/forum/message.php?msg_id=3738 -''' +""" import numpy as np @@ -141,7 +141,7 @@ class Nifti2Header(Nifti1Header): quaternion_threshold = -np.finfo(np.float64).eps * 3 def get_data_shape(self): - ''' Get shape of data + """ Get shape of data Examples -------- @@ -161,11 +161,11 @@ def get_data_shape(self): ----- Does not use Nifti1 freesurfer hack for large vectors described in :meth:`Nifti1Header.set_data_shape` - ''' + """ return AnalyzeHeader.get_data_shape(self) def set_data_shape(self, shape): - ''' Set shape of data + """ Set shape of data If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -179,17 +179,17 @@ def set_data_shape(self, shape): ----- Does not apply nifti1 Freesurfer hack for long vectors (see :meth:`Nifti1Header.set_data_shape`) - ''' + """ AnalyzeHeader.set_data_shape(self, shape) @classmethod def default_structarr(klass, endianness=None): - ''' Create empty header binary block with given endianness ''' + """ Create empty header binary block with given endianness """ hdr_data = super(Nifti2Header, klass).default_structarr(endianness) hdr_data['eol_check'] = (13, 10, 26, 10) return hdr_data - ''' Checks only below here ''' + """ Checks only below here """ @classmethod def _get_checks(klass): @@ -229,7 +229,7 @@ def may_contain_header(klass, binaryblock): class Nifti2PairHeader(Nifti2Header): - ''' Class for NIfTI2 pair header ''' + """ Class for NIfTI2 pair header """ # Signal whether this is single (header + data) file is_single = False diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 132d795d76..01ae7b5866 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Utilities for calculating and applying affine orientations ''' +""" Utilities for calculating and applying affine orientations """ import numpy as np @@ -20,7 +20,7 @@ class OrientationError(Exception): def io_orientation(affine, tol=None): - ''' Orientation of input axes in terms of output axes for `affine` + """ Orientation of input axes in terms of output axes for `affine` Valid for an affine transformation from ``p`` dimensions to ``q`` dimensions (``affine.shape == (q + 1, p + 1)``). @@ -50,7 +50,7 @@ def io_orientation(affine, tol=None): input axis is in the same direction as the corresponding output axis and -1 if it is in the opposite direction. If a row is [np.nan, np.nan], which can happen when p > q, then this row should be considered dropped. - ''' + """ affine = np.asarray(affine) q, p = affine.shape[0] - 1, affine.shape[1] - 1 # extract the underlying rotation, zoom, shear matrix @@ -93,7 +93,7 @@ def io_orientation(affine, tol=None): def ornt_transform(start_ornt, end_ornt): - '''Return the orientation that transforms from `start_ornt` to `end_ornt`. + """Return the orientation that transforms from `start_ornt` to `end_ornt`. Parameters ---------- @@ -107,7 +107,7 @@ def ornt_transform(start_ornt, end_ornt): ------- orientations : (p, 2) ndarray The orientation that will transform the `start_ornt` to the `end_ornt`. - ''' + """ start_ornt = np.asarray(start_ornt) end_ornt = np.asarray(end_ornt) if start_ornt.shape != end_ornt.shape: @@ -132,7 +132,7 @@ def ornt_transform(start_ornt, end_ornt): def apply_orientation(arr, ornt): - ''' Apply transformations implied by `ornt` to the first + """ Apply transformations implied by `ornt` to the first n axes of the array `arr` Parameters @@ -151,7 +151,7 @@ def apply_orientation(arr, ornt): ------- t_arr : ndarray data array `arr` transformed according to ornt - ''' + """ t_arr = np.asarray(arr) ornt = np.asarray(ornt) n = ornt.shape[0] @@ -174,7 +174,7 @@ def apply_orientation(arr, ornt): def inv_ornt_aff(ornt, shape): - ''' Affine transform reversing transforms implied in `ornt` + """ Affine transform reversing transforms implied in `ornt` Imagine you have an array ``arr`` of shape `shape`, and you apply the transforms implied by `ornt` (more below), to get ``tarr``. @@ -209,7 +209,7 @@ def inv_ornt_aff(ornt, shape): influence the output space, and is thus effectively dropped from the output space. In that case one ``tarr`` coordinate maps to many ``arr`` coordinates, we can't invert the transform, and we raise an error - ''' + """ ornt = np.asarray(ornt) if np.any(np.isnan(ornt)): raise OrientationError("We cannot invert orientation transform") @@ -242,7 +242,7 @@ def orientation_affine(ornt, shape): '3.2', '4.0') def flip_axis(arr, axis=0): - ''' Flip contents of `axis` in array `arr` + """ Flip contents of `axis` in array `arr` Equivalent to ``np.flip(arr, axis=0)``. @@ -256,7 +256,7 @@ def flip_axis(arr, axis=0): ------- farr : array Array with axis `axis` flipped - ''' + """ return np.flip(arr, axis) diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 7be15315d2..43b39f4e89 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -63,7 +63,7 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): def pkg_commit_hash(pkg_path=None): - ''' Get short form of commit hash + """ Get short form of commit hash Versioneer placed a ``_version.py`` file in the package directory. This file gets updated on installation or ``git archive``. @@ -83,7 +83,7 @@ def pkg_commit_hash(pkg_path=None): Where we got the hash from - description hash_str : str short form of hash - ''' + """ versions = _version.get_versions() hash_str = versions['full-revisionid'][:7] if hasattr(_version, 'version_json'): @@ -98,7 +98,7 @@ def pkg_commit_hash(pkg_path=None): def get_pkg_info(pkg_path): - ''' Return dict describing the context of this package + """ Return dict describing the context of this package Parameters ---------- @@ -109,7 +109,7 @@ def get_pkg_info(pkg_path): ------- context : dict with named parameters of interest - ''' + """ src, hsh = pkg_commit_hash() import numpy return dict( diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index adc2367238..cd3646853d 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' +""" Functions to operate on, or return, quaternions. The module also includes functions for the closely related angle, axis @@ -23,7 +23,7 @@ >>> M = quat2mat(q) # from this module >>> vec = np.array([1, 2, 3]).reshape((3,1)) # column vector >>> tvec = np.dot(M, vec) -''' +""" import math import numpy as np @@ -33,7 +33,7 @@ def fillpositive(xyz, w2_thresh=None): - ''' Compute unit quaternion from last 3 values + """ Compute unit quaternion from last 3 values Parameters ---------- @@ -80,7 +80,7 @@ def fillpositive(xyz, w2_thresh=None): True >>> np.dot(wxyz, wxyz) 1.0 - ''' + """ # Check inputs (force error if < 3 values) if len(xyz) != 3: raise ValueError('xyz should have length 3') @@ -104,7 +104,7 @@ def fillpositive(xyz, w2_thresh=None): def quat2mat(q): - ''' Calculate rotation matrix corresponding to quaternion + """ Calculate rotation matrix corresponding to quaternion Parameters ---------- @@ -135,7 +135,7 @@ def quat2mat(q): >>> M = quat2mat([0, 1, 0, 0]) # 180 degree rotn around axis 0 >>> np.allclose(M, np.diag([1, -1, -1])) True - ''' + """ w, x, y, z = q Nq = w * w + x * x + y * y + z * z if Nq < FLOAT_EPS: @@ -153,7 +153,7 @@ def quat2mat(q): def mat2quat(M): - ''' Calculate quaternion corresponding to given rotation matrix + """ Calculate quaternion corresponding to given rotation matrix Parameters ---------- @@ -195,7 +195,7 @@ def mat2quat(M): >>> np.allclose(q, [0, 1, 0, 0]) # 180 degree rotn around axis 0 True - ''' + """ # Qyx refers to the contribution of the y input vector component to # the x output vector component. Qyx is therefore the same as # M[0,1]. The notation is from the Wikipedia article. @@ -219,7 +219,7 @@ def mat2quat(M): def mult(q1, q2): - ''' Multiply two quaternions + """ Multiply two quaternions Parameters ---------- @@ -233,7 +233,7 @@ def mult(q1, q2): Notes ----- See : https://en.wikipedia.org/wiki/Quaternions#Hamilton_product - ''' + """ w1, x1, y1, z1 = q1 w2, x2, y2, z2 = q2 w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 @@ -244,7 +244,7 @@ def mult(q1, q2): def conjugate(q): - ''' Conjugate of quaternion + """ Conjugate of quaternion Parameters ---------- @@ -255,12 +255,12 @@ def conjugate(q): ------- conjq : array shape (4,) w, i, j, k of conjugate of `q` - ''' + """ return np.array(q) * np.array([1.0, -1, -1, -1]) def norm(q): - ''' Return norm of quaternion + """ Return norm of quaternion Parameters ---------- @@ -271,17 +271,17 @@ def norm(q): ------- n : scalar quaternion norm - ''' + """ return np.dot(q, q) def isunit(q): - ''' Return True is this is very nearly a unit quaternion ''' + """ Return True is this is very nearly a unit quaternion """ return np.allclose(norm(q), 1) def inverse(q): - ''' Return multiplicative inverse of quaternion `q` + """ Return multiplicative inverse of quaternion `q` Parameters ---------- @@ -292,17 +292,17 @@ def inverse(q): ------- invq : array shape (4,) w, i, j, k of quaternion inverse - ''' + """ return conjugate(q) / norm(q) def eye(): - ''' Return identity quaternion ''' + """ Return identity quaternion """ return np.array([1.0, 0, 0, 0]) def rotate_vector(v, q): - ''' Apply transformation in quaternion `q` to vector `v` + """ Apply transformation in quaternion `q` to vector `v` Parameters ---------- @@ -321,14 +321,14 @@ def rotate_vector(v, q): See: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Describing_rotations_with_quaternions - ''' + """ varr = np.zeros((4,)) varr[1:] = v return mult(q, mult(varr, conjugate(q)))[1:] def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): - ''' Returns True if `q1` and `q2` give near equivalent transforms + """ Returns True if `q1` and `q2` give near equivalent transforms `q1` may be nearly numerically equal to `q2`, or nearly equal to `q2` * -1 (because a quaternion multiplied by -1 gives the same transform). @@ -354,7 +354,7 @@ def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): True >>> nearly_equivalent(q1, [-1, 0, 0, 0]) True - ''' + """ q1 = np.array(q1) q2 = np.array(q2) if np.allclose(q1, q2, rtol, atol): @@ -363,7 +363,7 @@ def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): def angle_axis2quat(theta, vector, is_normalized=False): - ''' Quaternion for rotation of angle `theta` around `vector` + """ Quaternion for rotation of angle `theta` around `vector` Parameters ---------- @@ -389,7 +389,7 @@ def angle_axis2quat(theta, vector, is_normalized=False): Notes ----- Formula from http://mathworld.wolfram.com/EulerParameters.html - ''' + """ vector = np.array(vector) if not is_normalized: # Cannot divide in-place because input vector may be integer type, @@ -403,7 +403,7 @@ def angle_axis2quat(theta, vector, is_normalized=False): def angle_axis2mat(theta, vector, is_normalized=False): - ''' Rotation matrix of angle `theta` around `vector` + """ Rotation matrix of angle `theta` around `vector` Parameters ---------- @@ -423,7 +423,7 @@ def angle_axis2mat(theta, vector, is_normalized=False): Notes ----- From: https://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle - ''' + """ x, y, z = vector if not is_normalized: n = math.sqrt(x * x + y * y + z * z) @@ -441,7 +441,7 @@ def angle_axis2mat(theta, vector, is_normalized=False): def quat2angle_axis(quat, identity_thresh=None): - ''' Convert quaternion to rotation of angle around axis + """ Convert quaternion to rotation of angle around axis Parameters ---------- @@ -479,7 +479,7 @@ def quat2angle_axis(quat, identity_thresh=None): A quaternion for which x, y, z are all equal to 0, is an identity rotation. In this case we return a 0 angle and an arbitrary vector, here [1, 0, 0] - ''' + """ w, x, y, z = quat vec = np.asarray([x, y, z]) if identity_thresh is None: diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 0a140ec589..19dd2a0247 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' A simple spatial image class +""" A simple spatial image class The image class maintains the association between a 3D (or greater) array, and an affine transform that maps voxel coordinates to some world space. @@ -132,7 +132,7 @@ >>> np.all(img3.get_fdata(dtype=np.float32) == data) True -''' +""" import numpy as np @@ -147,15 +147,15 @@ class HeaderDataError(Exception): - ''' Class to indicate error in getting or setting header data ''' + """ Class to indicate error in getting or setting header data """ class HeaderTypeError(Exception): - ''' Class to indicate error in parameters into header functions ''' + """ Class to indicate error in parameters into header functions """ class SpatialHeader(FileBasedHeader): - ''' Template class to implement header protocol ''' + """ Template class to implement header protocol """ default_x_flip = True data_layout = 'F' @@ -202,11 +202,11 @@ def __ne__(self, other): return not self == other def copy(self): - ''' Copy object to independent representation + """ Copy object to independent representation The copy should not be affected by any changes to the original object. - ''' + """ return self.__class__(self._dtype, self._shape, self._zooms) def get_data_dtype(self): @@ -252,7 +252,7 @@ def get_base_affine(self): get_best_affine = get_base_affine def data_to_fileobj(self, data, fileobj, rescale=True): - ''' Write array data `data` as binary to `fileobj` + """ Write array data `data` as binary to `fileobj` Parameters ---------- @@ -263,13 +263,13 @@ def data_to_fileobj(self, data, fileobj, rescale=True): rescale : {True, False}, optional Whether to try and rescale data to match output dtype specified by header. For this minimal header, `rescale` has no effect - ''' + """ data = np.asarray(data) dtype = self.get_data_dtype() fileobj.write(data.astype(dtype).tobytes(order=self.data_layout)) def data_from_fileobj(self, fileobj): - ''' Read binary image data from `fileobj` ''' + """ Read binary image data from `fileobj` """ dtype = self.get_data_dtype() shape = self.get_data_shape() data_size = int(np.prod(shape) * dtype.itemsize) @@ -309,7 +309,7 @@ def supported_np_types(obj): class Header(SpatialHeader): - '''Alias for SpatialHeader; kept for backwards compatibility.''' + """Alias for SpatialHeader; kept for backwards compatibility.""" @deprecate_with_version('Header class is deprecated.\n' 'Please use SpatialHeader instead.' @@ -324,10 +324,10 @@ class ImageDataError(Exception): class SpatialFirstSlicer(object): - ''' Slicing interface that returns a new image with an updated affine + """ Slicing interface that returns a new image with an updated affine Checks that an image's first three axes are spatial - ''' + """ def __init__(self, img): # Local import to avoid circular import on module load from .imageclasses import spatial_axes_first @@ -350,7 +350,7 @@ def __getitem__(self, slicer): return self.img.__class__(dataobj.copy(), affine, self.img.header) def check_slicing(self, slicer, return_spatial=False): - ''' Canonicalize slicers and check for scalar indices in spatial dims + """ Canonicalize slicers and check for scalar indices in spatial dims Parameters ---------- @@ -365,7 +365,7 @@ def check_slicing(self, slicer, return_spatial=False): slicer : object Validated slicer object that will slice image's `dataobj` without collapsing spatial dimensions - ''' + """ slicer = canonical_slicers(slicer, self.img.shape) # We can get away with this because we've checked the image's # first three axes are spatial. @@ -417,13 +417,13 @@ def slice_affine(self, slicer): class SpatialImage(DataobjImage): - ''' Template class for volumetric (3D/4D) images ''' + """ Template class for volumetric (3D/4D) images """ header_class = SpatialHeader ImageSlicer = SpatialFirstSlicer def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): - ''' Initialize image + """ Initialize image The image is a combination of (array-like, affine matrix, header), with optional metadata in `extra`, and filename / file-like objects @@ -447,7 +447,7 @@ def __init__(self, dataobj, affine, header=None, metadata of this image type file_map : mapping, optional mapping giving file information for this image format - ''' + """ super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) if affine is not None: @@ -474,7 +474,7 @@ def affine(self): return self._affine def update_header(self): - ''' Harmonize header with image data and affine + """ Harmonize header with image data and affine >>> data = np.zeros((2,3,4)) >>> affine = np.diag([1.0,2.0,3.0,1.0]) @@ -486,7 +486,7 @@ def update_header(self): True >>> img.header.get_zooms() (1.0, 2.0, 3.0) - ''' + """ hdr = self._header shape = self._dataobj.shape # We need to update the header if the data shape has changed. It's a @@ -539,7 +539,7 @@ def get_affine(self): @classmethod def from_image(klass, img): - ''' Class method to create new instance of own class from `img` + """ Class method to create new instance of own class from `img` Parameters ---------- @@ -551,7 +551,7 @@ def from_image(klass, img): ------- cimg : ``spatialimage`` instance Image, of our own class - ''' + """ return klass(img.dataobj, img.affine, klass.header_class.from_header(img.header), @@ -578,11 +578,11 @@ def slicer(self): return self.ImageSlicer(self) def __getitem__(self, idx): - ''' No slicing or dictionary interface for images + """ No slicing or dictionary interface for images Use the slicer attribute to perform cropping and subsampling at your own risk. - ''' + """ raise TypeError( "Cannot slice image objects; consider using `img.slicer[slice]` " "to generate a sliced image (see documentation for caveats) or " diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index 1b53238b44..6786b19a0c 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to SPM2 version of analyze image format ''' +""" Read / write access to SPM2 version of analyze image format """ import numpy as np from . import spm99analyze as spm99 # module import @@ -23,20 +23,20 @@ class Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader): - ''' Class for SPM2 variant of basic Analyze header + """ Class for SPM2 variant of basic Analyze header SPM2 variant adds the following to basic Analyze format: * voxel origin; * slope scaling of data; * reading - but not writing - intercept of data. - ''' + """ # Copies of module level definitions template_dtype = header_dtype def get_slope_inter(self): - ''' Get data scaling (slope) and intercept from header data + """ Get data scaling (slope) and intercept from header data Uses the algorithm from SPM2 spm_vol_ana.m by John Ashburner @@ -95,7 +95,7 @@ def get_slope_inter(self): >>> hdr['glmin'] = 10 >>> np.allclose(hdr.get_slope_inter(), [0.6/100, 0.2-0.6/100*10]) True - ''' + """ # get scaling factor from 'scl_slope' (funused1) slope = float(self['scl_slope']) if np.isfinite(slope) and slope: diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 2ae780ebde..4d14724807 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to SPM99 version of analyze image format ''' +""" Read / write access to SPM99 version of analyze image format """ import warnings import numpy as np @@ -19,7 +19,7 @@ from .optpkg import optional_package have_scipy = optional_package('scipy')[1] -''' Support subtle variations of SPM version of Analyze ''' +""" Support subtle variations of SPM version of Analyze """ header_key_dtd = analyze.header_key_dtd # funused1 in dime subfield is scalefactor image_dimension_dtd = analyze.image_dimension_dtd[:] @@ -39,7 +39,7 @@ class SpmAnalyzeHeader(analyze.AnalyzeHeader): - ''' Basic scaling Spm Analyze header ''' + """ Basic scaling Spm Analyze header """ # Copies of module level definitions template_dtype = header_dtype @@ -49,17 +49,17 @@ class SpmAnalyzeHeader(analyze.AnalyzeHeader): @classmethod def default_structarr(klass, endianness=None): - ''' Create empty header binary block with given endianness ''' + """ Create empty header binary block with given endianness """ hdr_data = super(SpmAnalyzeHeader, klass).default_structarr(endianness) hdr_data['scl_slope'] = 1 return hdr_data def get_slope_inter(self): - ''' Get scalefactor and intercept + """ Get scalefactor and intercept If scalefactor is 0.0 return None to indicate no scalefactor. Intercept is always None because SPM99 analyze cannot store intercepts. - ''' + """ slope = self._structarr['scl_slope'] # Return invalid slopes as None if np.isnan(slope) or slope in (0, -np.inf, np.inf): @@ -67,7 +67,7 @@ def get_slope_inter(self): return slope, None def set_slope_inter(self, slope, inter=None): - ''' Set slope and / or intercept into header + """ Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -85,7 +85,7 @@ def set_slope_inter(self, slope, inter=None): inter : None or float, optional intercept. Must be None, NaN or 0, because SPM99 cannot store intercepts. - ''' + """ if slope is None: slope = np.nan if slope in (0, -np.inf, np.inf): @@ -98,16 +98,16 @@ def set_slope_inter(self, slope, inter=None): class Spm99AnalyzeHeader(SpmAnalyzeHeader): - ''' Class for SPM99 variant of basic Analyze header + """ Class for SPM99 variant of basic Analyze header SPM99 variant adds the following to basic Analyze format: * voxel origin; * slope scaling of data. - ''' + """ def get_origin_affine(self): - ''' Get affine from header, using SPM origin field if sensible + """ Get affine from header, using SPM origin field if sensible The default translations are got from the ``origin`` field, if set, or from the center of the image otherwise. @@ -137,7 +137,7 @@ def get_origin_affine(self): [ 0., 2., 0., -4.], [ 0., 0., 1., -3.], [ 0., 0., 0., 1.]]) - ''' + """ hdr = self._structarr zooms = hdr['pixdim'][1:4].copy() if self.default_x_flip: @@ -159,7 +159,7 @@ def get_origin_affine(self): get_best_affine = get_origin_affine def set_origin_from_affine(self, affine): - ''' Set SPM origin to header from affine matrix. + """ Set SPM origin to header from affine matrix. The ``origin`` field was read but not written by SPM99 and 2. It was used for storing a central voxel coordinate, that could be used in @@ -201,7 +201,7 @@ def set_origin_from_affine(self, affine): [ 0., 2., 0., -6.], [ 0., 0., 1., -4.], [ 0., 0., 0., 1.]]) - ''' + """ if affine.shape != (4, 4): raise ValueError('Need 4x4 affine to set') hdr = self._structarr @@ -244,7 +244,7 @@ class Spm99AnalyzeImage(analyze.AnalyzeImage): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - ''' Class method to create image from mapping in ``file_map`` + """ Class method to create image from mapping in ``file_map`` .. deprecated:: 2.4.1 ``keep_file_open='auto'`` is redundant with `False` and has @@ -277,7 +277,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): ------- img : Spm99AnalyzeImage instance - ''' + """ ret = super(Spm99AnalyzeImage, klass).from_file_map( file_map, mmap=mmap, keep_file_open=keep_file_open) try: @@ -313,7 +313,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return ret def to_file_map(self, file_map=None): - ''' Write image to `file_map` or contained ``self.file_map`` + """ Write image to `file_map` or contained ``self.file_map`` Extends Analyze ``to_file_map`` method by writing ``mat`` file @@ -322,7 +322,7 @@ def to_file_map(self, file_map=None): file_map : None or mapping, optional files mapping. If None (default) use object's ``file_map`` attribute instead - ''' + """ if file_map is None: file_map = self.file_map super(Spm99AnalyzeImage, self).to_file_map(file_map) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 52055ebcc3..71f9e84db2 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Utilities for testing ''' +""" Utilities for testing """ import re import os diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index b092a2334c..a71ba3339f 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -6,11 +6,11 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test Analyze headers +""" Test Analyze headers See test_wrapstruct.py for tests of the wrapped structarr-ness of the Analyze header -''' +""" import os import re diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 69f18b75ac..586f277150 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -6,8 +6,8 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for BatteryRunner and Report objects -''' +""" Tests for BatteryRunner and Report objects +""" from io import StringIO diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index e5d5000438..57d5b36f38 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -''' Tests for data module ''' +""" Tests for data module """ import os from os.path import join as pjoin from os import environ as env diff --git a/nibabel/tests/test_endiancodes.py b/nibabel/tests/test_endiancodes.py index 94c9ea0344..a9af11f052 100644 --- a/nibabel/tests/test_endiancodes.py +++ b/nibabel/tests/test_endiancodes.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for endiancodes module ''' +""" Tests for endiancodes module """ import sys diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 915e65e552..836444b3ba 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for Euler angles ''' +""" Tests for Euler angles """ import math import numpy as np @@ -20,7 +20,7 @@ FLOAT_EPS = np.finfo(np.float).eps -# Example rotations ''' +# Example rotations """ eg_rots = [] params = np.arange(-pi * 2, pi * 2.5, pi / 2) for x in params: @@ -135,7 +135,7 @@ def sympy_euler2quat(z=0, y=0, x=0): def crude_mat2euler(M): - ''' The simplest possible - ignoring atan2 instability ''' + """ The simplest possible - ignoring atan2 instability """ r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat return math.atan2(-r12, r11), math.asin(r13), math.atan2(-r23, r33) diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index b0abc6d608..e53d6ebd29 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for filename container ''' +""" Tests for filename container """ from ..filename_parser import (types_filenames, TypesFilenamesError, parse_filename, splitext_addext) diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index db196995e0..f6f7b59d34 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test for image funcs ''' +""" Test for image funcs """ import numpy as np diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 429144108c..b06d95c041 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for loader function ''' +""" Tests for loader function """ from io import BytesIO import shutil diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index 632e23224d..a19289037f 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for is_image / may_contain_header functions ''' +""" Tests for is_image / may_contain_header functions """ import copy from os.path import dirname, basename, join as pjoin diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 0494b8fe98..901c94ccf5 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for nifti reading package ''' +""" Tests for nifti reading package """ import os import warnings import struct diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index ca6e7d8125..83fefbc74b 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for nifti2 reading package ''' +""" Tests for nifti2 reading package """ import os import numpy as np diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 85a8f4a0a7..02744fd866 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test for openers module ''' +""" Test for openers module """ import os import contextlib from gzip import GzipFile diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 2322d570be..e2786c074e 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Testing for orientations module ''' +""" Testing for orientations module """ import numpy as np import warnings diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index cb24c7d0ce..ac76023584 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test quaternion calculations ''' +""" Test quaternion calculations """ import numpy as np from numpy import pi @@ -18,7 +18,7 @@ from .. import quaternions as nq from .. import eulerangles as nea -# Example rotations ''' +# Example rotations """ eg_rots = [] params = (-pi, pi, pi / 2) zs = np.arange(*params) diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index d6206df978..713e192707 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests recoder class ''' +""" Tests recoder class """ import numpy as np diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index f314e6b572..9ef8dd3bad 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test for scaling / rounding in volumeutils module ''' +""" Test for scaling / rounding in volumeutils module """ import numpy as np diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index a88d3cafd4..582f6b70bd 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Tests for SPM2 header stuff ''' +""" Tests for SPM2 header stuff """ import numpy as np diff --git a/nibabel/tests/test_trackvis.py b/nibabel/tests/test_trackvis.py index 562b2a128b..3591878be2 100644 --- a/nibabel/tests/test_trackvis.py +++ b/nibabel/tests/test_trackvis.py @@ -1,4 +1,4 @@ -''' Testing trackvis module ''' +""" Testing trackvis module """ from functools import partial diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 823350a3cd..f84878f55c 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test for volumeutils module ''' +""" Test for volumeutils module """ import os from os.path import exists @@ -221,7 +221,7 @@ def test_array_from_file_mmap(): def buf_chk(in_arr, out_buf, in_buf, offset): - ''' Write contents of in_arr into fileobj, read back, check same ''' + """ Write contents of in_arr into fileobj, read back, check same """ instr = b' ' * offset + in_arr.tobytes(order='F') out_buf.write(instr) out_buf.flush() @@ -1279,7 +1279,7 @@ def run(self): def _calculate_scale(data, out_dtype, allow_intercept): - ''' Calculate scaling and optional intercept for data + """ Calculate scaling and optional intercept for data Copy of the deprecated volumeutils.calculate_scale, to preserve tests @@ -1303,7 +1303,7 @@ def _calculate_scale(data, out_dtype, allow_intercept): mx : None or float minimum of finite value in data, or None if this will not be used to threshold data - ''' + """ # Code here is a compatibility shell around arraywriters refactor in_dtype = data.dtype out_dtype = np.dtype(out_dtype) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index a75cf0548b..fc63556edc 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Test binary header objects +""" Test binary header objects This is a root testing class, used in the Analyze and other tests as a framework for all the tests common to the Analyze types @@ -22,7 +22,7 @@ With deprecation warnings _field_recoders -> field_recoders -''' +""" import logging import numpy as np @@ -107,10 +107,10 @@ def log_chk(hdr, level): class _TestWrapStructBase(BaseTestCase): - ''' Class implements base tests for binary headers + """ Class implements base tests for binary headers It serves as a base class for other binary header tests - ''' + """ header_class = None def get_bad_bb(self): @@ -193,12 +193,12 @@ def test_mappingness(self): def test_endianness_ro(self): # endianness is a read only property - ''' Its use in initialization tested in the init tests. + """ Its use in initialization tested in the init tests. Endianness gives endian interpretation of binary data. It is read only because the only common use case is to set the endianness on initialization (or occasionally byteswapping the data) - but this is done via via the as_byteswapped method - ''' + """ hdr = self.header_class() with pytest.raises(AttributeError): hdr.endianness = '<' @@ -369,11 +369,11 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): - ''' Return sequence of check functions for this class ''' + """ Return sequence of check functions for this class """ return (klass._chk_integer, klass._chk_string) - ''' Check functions in format expected by BatteryRunner class ''' + """ Check functions in format expected by BatteryRunner class """ @staticmethod def _chk_integer(hdr, fix=False): rep = Report(HeaderDataError) diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 2636d8acb7..5ae4097c29 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -6,8 +6,8 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Contexts for *with* statement providing temporary directories -''' +""" Contexts for *with* statement providing temporary directories +""" import os import shutil from tempfile import template, mkdtemp @@ -49,7 +49,7 @@ def __exit__(self, exc, value, tb): class InTemporaryDirectory(TemporaryDirectory): - ''' Create, return, and change directory to a temporary directory + """ Create, return, and change directory to a temporary directory Examples -------- @@ -63,7 +63,7 @@ class InTemporaryDirectory(TemporaryDirectory): False >>> os.getcwd() == my_cwd True - ''' + """ def __enter__(self): self._pwd = os.getcwd() diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index 09d7602117..3b46336bd8 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -103,7 +103,7 @@ class DataError(Exception): 'nibabel.streamlines.load, instead.', since='2.5.0', until='4.0.0') def read(fileobj, as_generator=False, points_space=None, strict=True): - ''' Read trackvis file from `fileobj`, return `streamlines`, `header` + """ Read trackvis file from `fileobj`, return `streamlines`, `header` Parameters ---------- @@ -152,7 +152,7 @@ def read(fileobj, as_generator=False, points_space=None, strict=True): coordinates, ``x, y, z``, where ``x`` is the floating point voxel coordinate along the first image axis, multiplied by the voxel size for that axis. - ''' + """ fileobj = ImageOpener(fileobj) hdr_str = fileobj.read(header_2_dtype.itemsize) # try defaulting to version 2 format @@ -262,7 +262,7 @@ def track_gen(): since='2.5.0', until='4.0.0') def write(fileobj, streamlines, hdr_mapping=None, endianness=None, points_space=None): - ''' Write header and `streamlines` to trackvis file `fileobj` + """ Write header and `streamlines` to trackvis file `fileobj` The parameters from the streamlines override conflicting parameters in the `hdr_mapping` information. In particular, the number of @@ -356,7 +356,7 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, This information comes from some helpful replies on the trackvis forum about `interpreting point coordiantes `_ - ''' + """ stream_iter = iter(streamlines) try: streams0 = next(stream_iter) @@ -511,7 +511,7 @@ def _check_hdr_points_space(hdr, points_space): def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code): - ''' Fill `hdr` from mapping `mapping`, with given endianness ''' + """ Fill `hdr` from mapping `mapping`, with given endianness """ if hdr is None: # passed a valid mapping as header? Copy and return if isinstance(mapping, np.ndarray): @@ -546,7 +546,7 @@ def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code): 'nibabel.streamlines.TrkFile.create_empty_header, instead.', since='2.5.0', until='4.0.0') def empty_header(endianness=None, version=2): - ''' Empty trackvis header + """ Empty trackvis header Parameters ---------- @@ -583,7 +583,7 @@ def empty_header(endianness=None, version=2): missing. We make no attempt to fill it with sensible defaults on the basis that, if the information is missing, it is better to be explicit. - ''' + """ if version == 1: dt = header_1_dtype elif version == 2: @@ -603,7 +603,7 @@ def empty_header(endianness=None, version=2): 'nibabel.streamlines.trk.get_affine_trackvis_to_rasmm, instead.', since='2.5.0', until='4.0.0') def aff_from_hdr(trk_hdr, atleast_v2=True): - ''' Return voxel to mm affine from trackvis header + """ Return voxel to mm affine from trackvis header Affine is mapping from voxel space to Nifti (RAS) output coordinate system convention; x: Left -> Right, y: Posterior -> Anterior, z: @@ -636,7 +636,7 @@ def aff_from_hdr(trk_hdr, atleast_v2=True): allow negative voxel sizes (needed for recording axis flips) and sets the origin field to 0. In future, we'll raise an error rather than try and estimate the affine from version 1 fields - ''' + """ if trk_hdr['version'] == 2: aff = trk_hdr['vox_to_ras'] if aff[3, 3] != 0: @@ -683,7 +683,7 @@ def aff_from_hdr(trk_hdr, atleast_v2=True): 'nibabel.streamlines.TrkFile.affine_to_rasmm property, instead.', since='2.5.0', until='4.0.0') def aff_to_hdr(affine, trk_hdr, pos_vox=True, set_order=True): - ''' Set affine `affine` into trackvis header `trk_hdr` + """ Set affine `affine` into trackvis header `trk_hdr` Affine is mapping from voxel space to Nifti RAS) output coordinate system convention; x: Left -> Right, y: Posterior -> Anterior, z: @@ -723,7 +723,7 @@ def aff_to_hdr(affine, trk_hdr, pos_vox=True, set_order=True): reliably. It turns out that negative flips upset trackvis (the application). The application also ignores the origin field, and may not use the 'image_orientation_patient' field. - ''' + """ try: version = trk_hdr['version'] except (KeyError, ValueError): # dict or structured array @@ -765,7 +765,7 @@ class TrackvisFileError(Exception): class TrackvisFile(object): - ''' Convenience class to encapsulate trackvis file information + """ Convenience class to encapsulate trackvis file information Parameters ---------- @@ -792,7 +792,7 @@ class TrackvisFile(object): Affine expressing relationship of voxels in an image to mm in RAS mm space. If 'points_space' is not None, you can use this to give the relationship between voxels, rasmm and voxmm space (above). - ''' + """ @deprecate_with_version('TrackvisFile is deprecated; please use ' 'nibabel.streamlines.TrkFile, instead.', diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index e31cfe7258..abb54268d4 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -47,5 +47,5 @@ def __init__(self, msg): self._msg = msg def __getattr__(self, attr_name): - ''' Raise informative error accessing attributes ''' + """ Raise informative error accessing attributes """ raise TripWireError(self._msg) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index c3a82db418..606e06f52f 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Utility functions for analyze-like formats ''' +""" Utility functions for analyze-like formats """ import sys import warnings @@ -42,7 +42,7 @@ class Recoder(object): - ''' class to return canonical code(s) from code or aliases + """ class to return canonical code(s) from code or aliases The concept is a lot easier to read in the implementation and tests than it is to explain, so... @@ -73,10 +73,10 @@ class Recoder(object): >>> # indexing the object directly >>> recodes[2] 2 - ''' + """ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): - ''' Create recoder object + """ Create recoder object ``codes`` give a sequence of code, alias sequences ``fields`` are names by which the entries in these sequences can be @@ -103,7 +103,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): Default is ``dict``. ``map_maker()`` generates an empty mapping. The mapping need only implement ``__getitem__, __setitem__, keys, values``. - ''' + """ self.fields = tuple(fields) self.field1 = {} # a placeholder for the check below for name in fields: @@ -115,7 +115,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.add_codes(codes) def add_codes(self, code_syn_seqs): - ''' Add codes to object + """ Add codes to object Parameters ---------- @@ -139,7 +139,7 @@ def add_codes(self, code_syn_seqs): True >>> print(rc.value_set()) # set is actually ordered OrderedSet([2, 1, 3]) - ''' + """ for code_syns in code_syn_seqs: # Add all the aliases for alias in code_syns: @@ -149,7 +149,7 @@ def add_codes(self, code_syn_seqs): self.__dict__[field_name][alias] = code_syns[field_ind] def __getitem__(self, key): - ''' Return value from field1 dictionary (first column of values) + """ Return value from field1 dictionary (first column of values) Returns same value as ``obj.field1[key]`` and, with the default initializing ``fields`` argument of fields=('code',), @@ -158,7 +158,7 @@ def __getitem__(self, key): >>> codes = ((1, 'one'), (2, 'two')) >>> Recoder(codes)['two'] 2 - ''' + """ return self.field1[key] def __contains__(self, key): @@ -171,7 +171,7 @@ def __contains__(self, key): return True def keys(self): - ''' Return all available code and alias values + """ Return all available code and alias values Returns same value as ``obj.field1.keys()`` and, with the default initializing ``fields`` argument of fields=('code',), @@ -181,11 +181,11 @@ def keys(self): >>> k = Recoder(codes).keys() >>> set(k) == set([1, 2, 'one', 'repeat value', 'two']) True - ''' + """ return self.field1.keys() def value_set(self, name=None): - ''' Return OrderedSet of possible returned values for column + """ Return OrderedSet of possible returned values for column By default, the column is the first column. @@ -206,7 +206,7 @@ def value_set(self, name=None): >>> rc = Recoder(codes, fields=('code', 'label')) >>> rc.value_set('label') == set(('one', 'two', 'repeat value')) True - ''' + """ if name is None: d = self.field1 else: @@ -274,7 +274,7 @@ def __getitem__(self, key): def pretty_mapping(mapping, getterfunc=None): - ''' Make pretty string from mapping + """ Make pretty string from mapping Adjusts text column to print values on basis of longest key. Probably only sensible if keys are mainly strings. @@ -320,7 +320,7 @@ def pretty_mapping(mapping, getterfunc=None): >>> print(pretty_mapping(C(), getter)) short_field : 0 longer_field : method string - ''' + """ if getterfunc is None: getterfunc = lambda obj, key: obj[key] lens = [len(str(name)) for name in mapping] @@ -334,7 +334,7 @@ def pretty_mapping(mapping, getterfunc=None): def make_dt_codes(codes_seqs): - ''' Create full dt codes Recoder instance from datatype codes + """ Create full dt codes Recoder instance from datatype codes Include created numpy dtype (from numpy type) and opposite endian numpy dtype @@ -354,7 +354,7 @@ def make_dt_codes(codes_seqs): of the corresponding code, name, type, dtype, or swapped dtype. You can also index with ``niistring`` values if codes_seqs had sequences of length 4 instead of 3. - ''' + """ fields = ['code', 'label', 'type'] len0 = len(codes_seqs[0]) if len0 not in (3, 4): @@ -378,7 +378,7 @@ def make_dt_codes(codes_seqs): '1.2', '3.0') def can_cast(in_type, out_type, has_intercept=False, has_slope=False): - ''' Return True if we can safely cast ``in_type`` to ``out_type`` + """ Return True if we can safely cast ``in_type`` to ``out_type`` Parameters ---------- @@ -424,7 +424,7 @@ def can_cast(in_type, out_type, has_intercept=False, has_slope=False): False >>> can_cast(np.int16, np.uint8, True, True) # doctest: +SKIP True - ''' + """ in_dtype = np.dtype(in_type) # Whether we can cast depends on the data, and we've only got the type. # Let's assume integers use all of their range but floats etc not @@ -448,7 +448,7 @@ def _is_compressed_fobj(fobj): def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): - ''' Get array from file with specified shape, dtype and file offset + """ Get array from file with specified shape, dtype and file offset Parameters ---------- @@ -489,7 +489,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): >>> arr2 = array_from_file((1,2,3), arr.dtype, bio, 10) >>> np.all(arr == arr2) True - ''' + """ if mmap not in (True, False, 'c', 'r', 'r+'): raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") @@ -542,7 +542,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): def array_to_file(data, fileobj, out_dtype=None, offset=0, intercept=0.0, divslope=1.0, mn=None, mx=None, order='F', nan2zero=True): - ''' Helper function for writing arrays to file objects + """ Helper function for writing arrays to file objects Writes arrays as scaled by `intercept` and `divslope`, and clipped at (prescaling) `mn` minimum, and `mx` maximum. @@ -621,7 +621,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, >>> array_to_file(data, sio, np.float, order='C') >>> sio.getvalue() == data.tobytes('C') True - ''' + """ # Shield special case div_none = divslope is None if not np.all( @@ -1015,7 +1015,7 @@ def working_type(in_type, slope=1.0, inter=0.0): '1.2', '3.0') def calculate_scale(data, out_dtype, allow_intercept): - ''' Calculate scaling and optional intercept for data + """ Calculate scaling and optional intercept for data Parameters ---------- @@ -1037,7 +1037,7 @@ def calculate_scale(data, out_dtype, allow_intercept): mx : None or float minimum of finite value in data, or None if this will not be used to threshold data - ''' + """ # Code here is a compatibility shell around arraywriters refactor in_dtype = data.dtype out_dtype = np.dtype(out_dtype) @@ -1063,7 +1063,7 @@ def calculate_scale(data, out_dtype, allow_intercept): '1.2', '3.0') def scale_min_max(mn, mx, out_type, allow_intercept): - ''' Return scaling and intercept min, max of data, given output type + """ Return scaling and intercept min, max of data, given output type Returns ``scalefactor`` and ``intercept`` to best fit data with given ``mn`` and ``mx`` min and max values into range of data type @@ -1124,7 +1124,7 @@ def scale_min_max(mn, mx, out_type, allow_intercept): The large integers lead to python long types as max / min for type. To contain the rounding error, we need to use the maximum numpy float types when casting to float. - ''' + """ if mn > mx: raise ValueError('min value > max value') info = type_info(out_type) @@ -1361,7 +1361,7 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', def finite_range(arr, check_nan=False): - ''' Get range (min, max) or range and flag (min, max, has_nan) from `arr` + """ Get range (min, max) or range and flag (min, max, has_nan) from `arr` Parameters ---------- @@ -1405,7 +1405,7 @@ def finite_range(arr, check_nan=False): Traceback (most recent call last): ... TypeError: Can only handle numeric types - ''' + """ arr = np.asarray(arr) if arr.size == 0: return (np.inf, -np.inf) + (False,) * check_nan @@ -1457,7 +1457,7 @@ def finite_range(arr, check_nan=False): def shape_zoom_affine(shape, zooms, x_flip=True): - ''' Get affine implied by given shape and zooms + """ Get affine implied by given shape and zooms We get the translations from the center of the image (implied by `shape`). @@ -1492,7 +1492,7 @@ def shape_zoom_affine(shape, zooms, x_flip=True): [ 0., 2., 0., -4.], [ 0., 0., 1., -3.], [ 0., 0., 0., 1.]]) - ''' + """ shape = np.asarray(shape) zooms = np.array(zooms) # copy because of flip below ndims = len(shape) @@ -1519,7 +1519,7 @@ def shape_zoom_affine(shape, zooms, x_flip=True): def rec2dict(rec): - ''' Convert recarray to dictionary + """ Convert recarray to dictionary Also converts scalar values to scalars @@ -1539,7 +1539,7 @@ def rec2dict(rec): >>> d = rec2dict(r) >>> d == {'x': 0, 's': b''} True - ''' + """ dct = {} for key in rec.dtype.fields: val = rec[key] diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index d6dbde6290..4eabe2504a 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -129,7 +129,7 @@ def __init__(self, binaryblock=None, endianness=None, check=True): - ''' Initialize WrapStruct from binary data block + """ Initialize WrapStruct from binary data block Parameters ---------- @@ -153,7 +153,7 @@ def __init__(self, >>> wstr1['integer'] = 1 >>> wstr1['integer'] array(1, dtype=int16) - ''' + """ if binaryblock is None: self._structarr = self.__class__.default_structarr(endianness) return @@ -176,7 +176,7 @@ def __init__(self, @classmethod def from_fileobj(klass, fileobj, endianness=None, check=True): - ''' Return read structure with given or guessed endiancode + """ Return read structure with given or guessed endiancode Parameters ---------- @@ -189,13 +189,13 @@ def from_fileobj(klass, fileobj, endianness=None, check=True): ------- wstr : WrapStruct object WrapStruct object initialized from data in fileobj - ''' + """ raw_str = fileobj.read(klass.template_dtype.itemsize) return klass(raw_str, endianness, check) @property def binaryblock(self): - ''' binary block of data as string + """ binary block of data as string Returns ------- @@ -208,11 +208,11 @@ def binaryblock(self): >>> wstr = WrapStruct() >>> len(wstr.binaryblock) 2 - ''' + """ return self._structarr.tobytes() def write_to(self, fileobj): - ''' Write structure to fileobj + """ Write structure to fileobj Write starts at fileobj current file position. @@ -233,12 +233,12 @@ def write_to(self, fileobj): >>> wstr.write_to(str_io) >>> wstr.binaryblock == str_io.getvalue() True - ''' + """ fileobj.write(self.binaryblock) @property def endianness(self): - ''' endian code of binary data + """ endian code of binary data The endianness code gives the current byte order interpretation of the binary data. @@ -256,13 +256,13 @@ def endianness(self): read only because the only common use case is to set the endianness on initialization, or occasionally byteswapping the data - but this is done via the as_byteswapped method - ''' + """ if self._structarr.dtype.isnative: return native_code return swapped_code def copy(self): - ''' Return copy of structure + """ Return copy of structure >>> wstr = WrapStruct() >>> wstr['integer'] = 3 @@ -271,11 +271,11 @@ def copy(self): False >>> wstr2['integer'] array(3, dtype=int16) - ''' + """ return self.__class__(self.binaryblock, self.endianness, check=False) def __eq__(self, other): - ''' equality between two structures defined by binaryblock + """ equality between two structures defined by binaryblock Examples -------- @@ -286,7 +286,7 @@ def __eq__(self, other): >>> wstr3 = WrapStruct(endianness=swapped_code) >>> wstr == wstr3 True - ''' + """ this_end = self.endianness this_bb = self.binaryblock try: @@ -303,18 +303,18 @@ def __ne__(self, other): return not self == other def __getitem__(self, item): - ''' Return values from structure data + """ Return values from structure data Examples -------- >>> wstr = WrapStruct() >>> wstr['integer'] == 0 True - ''' + """ return self._structarr[item] def __setitem__(self, item, value): - ''' Set values in structured data + """ Set values in structured data Examples -------- @@ -322,31 +322,31 @@ def __setitem__(self, item, value): >>> wstr['integer'] = 3 >>> wstr['integer'] array(3, dtype=int16) - ''' + """ self._structarr[item] = value def __iter__(self): return iter(self.keys()) def keys(self): - ''' Return keys from structured data''' + """ Return keys from structured data""" return list(self.template_dtype.names) def values(self): - ''' Return values from structured data''' + """ Return values from structured data""" data = self._structarr return [data[key] for key in self.template_dtype.names] def items(self): - ''' Return items from structured data''' + """ Return items from structured data""" return zip(self.keys(), self.values()) def get(self, k, d=None): - ''' Return value for the key k if present or d otherwise''' + """ Return value for the key k if present or d otherwise""" return self._structarr[k] if k in self.keys() else d def check_fix(self, logger=None, error_level=None): - ''' Check structured data with checks + """ Check structured data with checks Parameters ---------- @@ -354,7 +354,7 @@ def check_fix(self, logger=None, error_level=None): error_level : None or int Level of error severity at which to raise error. Any error of severity >= `error_level` will cause an exception. - ''' + """ if logger is None: logger = imageglobals.logger if error_level is None: @@ -366,7 +366,7 @@ def check_fix(self, logger=None, error_level=None): @classmethod def diagnose_binaryblock(klass, binaryblock, endianness=None): - ''' Run checks over binary data, return string ''' + """ Run checks over binary data, return string """ wstr = klass(binaryblock, endianness=endianness, check=False) battrun = BatteryRunner(klass._get_checks()) reports = battrun.check_only(wstr) @@ -375,7 +375,7 @@ def diagnose_binaryblock(klass, binaryblock, endianness=None): @classmethod def guessed_endian(self, mapping): - ''' Guess intended endianness from mapping-like ``mapping`` + """ Guess intended endianness from mapping-like ``mapping`` Parameters ---------- @@ -387,13 +387,13 @@ def guessed_endian(self, mapping): ------- endianness : {'<', '>'} Guessed endianness of binary data in ``wstr`` - ''' + """ raise NotImplementedError @classmethod def default_structarr(klass, endianness=None): - ''' Return structured array for default structure with given endianness - ''' + """ Return structured array for default structure with given endianness + """ dt = klass.template_dtype if endianness is not None: endianness = endian_codes[endianness] @@ -402,7 +402,7 @@ def default_structarr(klass, endianness=None): @property def structarr(self): - ''' Structured data, with data fields + """ Structured data, with data fields Examples -------- @@ -412,17 +412,17 @@ def structarr(self): Traceback (most recent call last): ... AttributeError: can't set attribute - ''' + """ return self._structarr def __str__(self): - ''' Return string representation for printing ''' + """ Return string representation for printing """ summary = "%s object, endian='%s'" % (self.__class__, self.endianness) return '\n'.join([summary, pretty_mapping(self)]) def as_byteswapped(self, endianness=None): - ''' return new byteswapped object with given ``endianness`` + """ return new byteswapped object with given ``endianness`` Guaranteed to make a copy even if endianness is the same as the current endianness. @@ -468,7 +468,7 @@ def as_byteswapped(self, endianness=None): True >>> nbs_wstr is wstr False - ''' + """ current = self.endianness if endianness is None: if current == native_code: @@ -484,7 +484,7 @@ def as_byteswapped(self, endianness=None): @classmethod def _get_checks(klass): - ''' Return sequence of check functions for this class ''' + """ Return sequence of check functions for this class """ return () @@ -494,7 +494,7 @@ class LabeledWrapStruct(WrapStruct): _field_recoders = {} # for recoding values for str def get_value_label(self, fieldname): - ''' Returns label for coded field + """ Returns label for coded field A coded field is an int field containing codes that stand for discrete values that also have string labels. @@ -527,7 +527,7 @@ def get_value_label(self, fieldname): >>> hdr['datatype'] = 2 >>> hdr.get_value_label('datatype') 'two' - ''' + """ if fieldname not in self._field_recoders: raise ValueError('%s not a coded field' % fieldname) code = int(self._structarr[fieldname]) @@ -537,7 +537,7 @@ def get_value_label(self, fieldname): return ''.format(code) def __str__(self): - ''' Return string representation for printing ''' + """ Return string representation for printing """ summary = "%s object, endian='%s'" % (self.__class__, self.endianness) def _getter(obj, key): diff --git a/nisext/sexts.py b/nisext/sexts.py index c8090f5eac..9ca3519f45 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -1,4 +1,4 @@ -''' Distutils / setuptools helpers ''' +""" Distutils / setuptools helpers """ import os from os.path import join as pjoin, split as psplit, splitext @@ -48,7 +48,7 @@ def get_comrec_build(pkg_dir, build_cmd=build_py): package for an example. """ class MyBuildPy(build_cmd): - ''' Subclass to write commit data into installation tree ''' + """ Subclass to write commit data into installation tree """ def run(self): build_cmd.run(self) import subprocess @@ -88,7 +88,7 @@ def package_check(pkg_name, version=None, messages=None, setuptools_args=None ): - ''' Check if package `pkg_name` is present and has good enough version + """ Check if package `pkg_name` is present and has good enough version Has two modes of operation. If `setuptools_args` is None (the default), raise an error for missing non-optional dependencies and log warnings for @@ -130,7 +130,7 @@ def package_check(pkg_name, version=None, If None, raise errors / warnings for missing non-optional / optional dependencies. If dict fill key values ``install_requires`` and ``extras_require`` for non-optional and optional dependencies. - ''' + """ setuptools_mode = not setuptools_args is None optional_tf = bool(optional) if version_getter is None: diff --git a/nisext/testers.py b/nisext/testers.py index e0ca4a040a..f324d272b4 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -1,4 +1,4 @@ -''' Test package information in various install settings +""" Test package information in various install settings The routines here install the package from source directories, zips or eggs, and check these installations by running tests, checking version information, @@ -27,7 +27,7 @@ bdist-egg-tests: $(PYTHON) -c 'from nisext.testers import bdist_egg_tests; bdist_egg_tests("nibabel")' -''' +""" import os @@ -94,7 +94,7 @@ def back_tick(cmd, ret_err=False, as_str=True): def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): - ''' Run command in own process in anonymous path + """ Run command in own process in anonymous path Parameters ---------- @@ -116,7 +116,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): stdout as str stderr : str stderr as str - ''' + """ if script_dir is None: paths_add = '' else: @@ -162,11 +162,11 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): def zip_extract_all(fname, path=None): - ''' Extract all members from zipfile + """ Extract all members from zipfile Deals with situation where the directory is stored in the zipfile as a name, as well as files that have to go into this directory. - ''' + """ zf = zipfile.ZipFile(fname) members = zf.namelist() # Remove members that are just bare directories @@ -241,7 +241,7 @@ def install_from_zip(zip_fname, install_path, pkg_finder=None, def contexts_print_info(mod_name, repo_path, install_path): - ''' Print result of get_info from different installation routes + """ Print result of get_info from different installation routes Runs installation from: @@ -260,7 +260,7 @@ def contexts_print_info(mod_name, repo_path, install_path): path to location of git repository install_path : str path into which to install temporary installations - ''' + """ site_pkgs_path = os.path.join(install_path, PY_LIB_SDIR) # first test archive pwd = os.path.abspath(os.getcwd()) @@ -282,7 +282,7 @@ def contexts_print_info(mod_name, repo_path, install_path): def info_from_here(mod_name): - ''' Run info context checks starting in working directory + """ Run info context checks starting in working directory Runs checks from current working directory, installing temporary installations into a new temporary directory @@ -291,7 +291,7 @@ def info_from_here(mod_name): ---------- mod_name : str package name that will be installed, and tested - ''' + """ repo_path = os.path.abspath(os.getcwd()) install_path = tempfile.mkdtemp() try: diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index 851c0579cd..4ebfba7557 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -''' Checkout gitwash repo into directory and do search replace on name ''' +""" Checkout gitwash repo into directory and do search replace on name """ import os from os.path import join as pjoin @@ -51,9 +51,9 @@ def cp_files(in_path, globs, out_path): def filename_search_replace(sr_pairs, filename, backup=False): - ''' Search and replace for expressions in files + """ Search and replace for expressions in files - ''' + """ in_txt = open(filename, 'rt').read(-1) out_txt = in_txt[:] for in_exp, out_exp in sr_pairs: @@ -152,13 +152,13 @@ def make_link_targets(proj_name, out_links.close() -USAGE = ''' +USAGE = """ If not set with options, the repository name is the same as the If not set with options, the main github user is the same as the -repository name.''' +repository name.""" GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git' From b0fb8cb85675489ec2bb383f28c545bb237cd0c9 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 19:56:29 +0300 Subject: [PATCH 5/9] RF: more SafeConfigParser -> ConfigParser --- nibabel/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nibabel/data.py b/nibabel/data.py index 999270b2b4..793b6b310c 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -124,7 +124,7 @@ def __init__(self, base_path, config_filename=None): Datasource.__init__(self, base_path) if config_filename is None: config_filename = 'config.ini' - self.config = configparser.SafeConfigParser() + self.config = configparser.ConfigParser() cfg_file = self.get_filename(config_filename) readfiles = self.config.read(cfg_file) if not readfiles: From 97aad3af39fec97c2d566c1662b2f757b8d94533 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 22:06:45 +0300 Subject: [PATCH 6/9] RF: Old string formatting to f-strings Complies with python_requires >= 3.6 Was produced with flynt --- doc/source/conf.py | 2 +- doc/source/devel/register_me.py | 2 +- doc/tools/apigen.py | 7 +- doc/tools/build_modref_templates.py | 2 +- nibabel/_version.py | 27 ++++---- nibabel/analyze.py | 9 ++- nibabel/batteryrunners.py | 3 +- .../benchmarks/bench_arrayproxy_slicing.py | 5 +- nibabel/benchmarks/bench_fileslice.py | 4 +- nibabel/benchmarks/bench_streamlines.py | 10 ++- nibabel/benchmarks/butils.py | 4 +- nibabel/brikhead.py | 6 +- nibabel/casting.py | 4 +- nibabel/cifti2/cifti2.py | 2 +- nibabel/cifti2/cifti2_axes.py | 10 +-- nibabel/cifti2/tests/test_cifti2io_header.py | 2 +- nibabel/cifti2/tests/test_new_cifti2.py | 4 +- nibabel/cmdline/conform.py | 4 +- nibabel/cmdline/dicomfs.py | 42 +++++------ nibabel/cmdline/diff.py | 2 +- nibabel/cmdline/ls.py | 18 ++--- nibabel/cmdline/nifti_dx.py | 6 +- nibabel/cmdline/parrec2nii.py | 25 ++++--- nibabel/cmdline/tck2trk.py | 2 +- nibabel/cmdline/trk2tck.py | 2 +- nibabel/cmdline/utils.py | 9 ++- nibabel/data.py | 11 ++- nibabel/dataobj_images.py | 2 +- nibabel/deprecated.py | 2 +- nibabel/dft.py | 22 +++--- nibabel/externals/netcdf.py | 13 ++-- nibabel/externals/oset.py | 4 +- nibabel/filebasedimages.py | 3 +- nibabel/filename_parser.py | 3 +- nibabel/fileslice.py | 2 +- nibabel/fileutils.py | 3 +- nibabel/freesurfer/io.py | 18 ++--- nibabel/freesurfer/mghformat.py | 5 +- nibabel/freesurfer/tests/test_io.py | 25 ++++--- nibabel/funcs.py | 3 +- nibabel/gifti/gifti.py | 4 +- nibabel/gifti/parse_gifti_fast.py | 2 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 3 +- nibabel/info.py | 5 +- nibabel/loadsave.py | 15 ++-- nibabel/nicom/csareader.py | 5 +- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/dicomwrappers.py | 2 +- nibabel/nifti1.py | 22 +++--- nibabel/optpkg.py | 7 +- nibabel/orientations.py | 7 +- nibabel/parrec.py | 11 ++- nibabel/processing.py | 4 +- nibabel/quaternions.py | 2 +- nibabel/rstutils.py | 7 +- nibabel/streamlines/__init__.py | 4 +- nibabel/streamlines/array_sequence.py | 5 +- nibabel/streamlines/tck.py | 12 ++-- .../streamlines/tests/test_array_sequence.py | 2 +- nibabel/streamlines/tests/test_trk.py | 8 +-- nibabel/testing/__init__.py | 6 +- nibabel/tests/data/check_parrec_reslice.py | 4 +- nibabel/tests/nibabel_data.py | 2 +- nibabel/tests/scriptrunner.py | 14 ++-- nibabel/tests/test_api_validators.py | 2 +- nibabel/tests/test_arrayproxy.py | 2 +- nibabel/tests/test_arraywriters.py | 2 +- nibabel/tests/test_data.py | 4 +- nibabel/tests/test_deprecator.py | 3 +- nibabel/tests/test_floating.py | 2 +- nibabel/tests/test_image_types.py | 3 +- nibabel/tests/test_loadsave.py | 2 +- nibabel/tests/test_removalschedule.py | 6 +- nibabel/tests/test_scaling.py | 2 +- nibabel/tests/test_scripts.py | 8 +-- nibabel/tests/test_spatialimages.py | 4 +- nibabel/tests/test_wrapstruct.py | 2 +- nibabel/trackvis.py | 13 ++-- nibabel/viewers.py | 4 +- nibabel/volumeutils.py | 3 +- nibabel/wrapstruct.py | 9 ++- nisext/sexts.py | 4 +- nisext/testers.py | 18 +++-- nisext/tests/test_testers.py | 2 +- tools/make_tarball.py | 6 +- versioneer.py | 69 +++++++++---------- 86 files changed, 300 insertions(+), 354 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index d3e75237ab..9ea5495ef1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -92,7 +92,7 @@ # General information about the project. project = u'NiBabel' -copyright = u'2006-2020, %(maintainer)s <%(author_email)s>' % metadata +copyright = f"2006-2020, {metadata['maintainer']} <{metadata['author_email']}>" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/source/devel/register_me.py b/doc/source/devel/register_me.py index d2f571f04f..76e0dbc641 100644 --- a/doc/source/devel/register_me.py +++ b/doc/source/devel/register_me.py @@ -40,7 +40,7 @@ def main(): dsource.set(name, version, OUR_PATH) dsource.write(file(ini_fname, 'wt')) - print('Registered package %s, %s to %s' % (name, version, ini_fname)) + print(f'Registered package {name}, {version} to {ini_fname}') if __name__ == '__main__': diff --git a/doc/tools/apigen.py b/doc/tools/apigen.py index 05498c69a9..52966300e2 100644 --- a/doc/tools/apigen.py +++ b/doc/tools/apigen.py @@ -342,8 +342,7 @@ def _survives_exclude(self, matchstr, match_type): elif match_type == 'package': patterns = self.package_skip_patterns else: - raise ValueError('Cannot interpret match type "%s"' - % match_type) + raise ValueError(f'Cannot interpret match type "{match_type}"') # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: @@ -424,7 +423,7 @@ def write_modules_api(self, modules, outdir): written_modules = [] for ulm, mods in module_by_ulm.items(): - print("Generating docs for %s:" % ulm) + print(f"Generating docs for {ulm}:") document_head = [] document_body = [] @@ -505,5 +504,5 @@ def write_index(self, outdir, froot='gen', relative_to=None): w("=" * len(title) + "\n\n") w('.. toctree::\n\n') for f in self.written_modules: - w(' %s\n' % os.path.join(relpath, f)) + w(f' {os.path.join(relpath, f)}\n') idx.close() diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index da752b6c42..6ec6848579 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -18,7 +18,7 @@ def abort(error): - print('*WARNING* API documentation not generated: %s' % error) + print(f'*WARNING* API documentation not generated: {error}') exit(1) diff --git a/nibabel/_version.py b/nibabel/_version.py index 60031b4d17..bfb8d6e9f9 100644 --- a/nibabel/_version.py +++ b/nibabel/_version.py @@ -87,20 +87,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if e.errno == errno.ENOENT: continue if verbose: - print("unable to run %s" % dispcmd) + print(f"unable to run {dispcmd}") print(e) return None, None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}") return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") return None, p.returncode return stdout, p.returncode @@ -201,9 +201,9 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) + print(f"discarding '{','.join(refs - tags)}', no digits") if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) + print(f"likely tags: {','.join(sorted(tags))}") for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): @@ -214,7 +214,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if not re.match(r'\d', r): continue if verbose: - print("picking %s" % r) + print(f"picking {r}") return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, @@ -243,14 +243,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): hide_stderr=True) if rc != 0: if verbose: - print("Directory %s not under git control" % root) + print(f"Directory {root} not under git control") raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", - "--match", "%s*" % tag_prefix], + "--match", f"{tag_prefix}*"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: @@ -283,8 +283,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = (f"unable to parse git-describe output: '{describe_out}'") return pieces # tag @@ -384,13 +383,13 @@ def render_pep440_post(pieces): if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] + rendered += f"g{pieces['short']}" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" - rendered += "+g%s" % pieces["short"] + rendered += f"+g{pieces['short']}" return rendered @@ -481,7 +480,7 @@ def render(pieces, style): elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: - raise ValueError("unknown style '%s'" % style) + raise ValueError(f"unknown style '{style}'") return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 53e01db64c..6aa1418f72 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -571,16 +571,16 @@ def set_data_dtype(self, datatype): dt = np.dtype(dt) except TypeError: raise HeaderDataError( - 'data dtype "{0}" not recognized'.format(datatype)) + f'data dtype "{datatype}" not recognized') if dt not in self._data_type_codes: raise HeaderDataError( - 'data dtype "{0}" not supported'.format(datatype)) + f'data dtype "{datatype}" not supported') code = self._data_type_codes[dt] dtype = self._data_type_codes.dtype[code] # test for void, being careful of user-defined types if dtype.type is np.void and not dtype.fields: raise HeaderDataError( - 'data dtype "{0}" known but not supported'.format(datatype)) + f'data dtype "{datatype}" known but not supported') self._structarr['datatype'] = code self._structarr['bitpix'] = dtype.itemsize * 8 @@ -632,8 +632,7 @@ def set_data_shape(self, shape): values_fit = np.all(dims[1:ndims + 1] == shape) # Error if we did not succeed setting dimensions if not values_fit: - raise HeaderDataError('shape %s does not fit in dim datatype' % - (shape,)) + raise HeaderDataError(f'shape {shape} does not fit in dim datatype') self._structarr['pixdim'][ndims + 1:] = 1.0 def get_base_affine(self): diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index 78a887fb56..67af4f4a8b 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -291,8 +291,7 @@ def write_raise(self, stream, error_level=40, log_level=30): write the report to `stream`, otherwise we write nothing. """ if self.problem_level >= log_level: - stream.write('Level %s: %s\n' % - (self.problem_level, self.message)) + stream.write(f'Level {self.problem_level}: {self.message}\n') if self.problem_level and self.problem_level >= error_level: if self.error: raise self.error(self.problem_msg) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 2ed9ec9ccd..8afebb546a 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -96,7 +96,7 @@ def fmt_sliceobj(sliceobj): slcstr.append(s) else: slcstr.append(str(int(s * SHAPE[i]))) - return '[{}]'.format(', '.join(slcstr)) + return f"[{', '.join(slcstr)}]" with InTemporaryDirectory(): @@ -133,8 +133,7 @@ def fmt_sliceobj(sliceobj): have_igzip, keep_open, sliceobj = test seed = seeds[SLICEOBJS.index(sliceobj)] - print('Running test {} of {} ({})...'.format( - ti + 1, len(tests), label)) + print(f'Running test {ti + 1} of {len(tests)} ({label})...') # load uncompressed and compressed versions of the image img = nib.load(testfile, keep_file_open=keep_open) diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index c19d3b89f0..5d99b939d7 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -20,7 +20,7 @@ from ..tmpdirs import InTemporaryDirectory SHAPE = (64, 64, 32, 100) -ROW_NAMES = ['axis {0}, len {1}'.format(i, SHAPE[i]) +ROW_NAMES = [f'axis {i}, len {SHAPE[i]}' for i in range(len(SHAPE))] COL_NAMES = ['mid int', 'step 1', @@ -79,7 +79,7 @@ def my_table(title, times, base): print() print(rst_table(times, ROW_NAMES, COL_NAMES, title, val_fmt='{0[0]:3.2f} ({0[1]:3.2f})')) - print('Base time: {0:3.2f}'.format(base)) + print(f'Base time: {base:3.2f}') if bytes: fobj = BytesIO() times, base = run_slices(fobj, repeat) diff --git a/nibabel/benchmarks/bench_streamlines.py b/nibabel/benchmarks/bench_streamlines.py index 5c49c9e177..54ed8e3e4d 100644 --- a/nibabel/benchmarks/bench_streamlines.py +++ b/nibabel/benchmarks/bench_streamlines.py @@ -44,16 +44,14 @@ def bench_load_trk(): streamlines_old = [d[0] - 0.5 for d in tv.read(trk_file, points_space="rasmm")[0]] mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat) - print("Old: Loaded {:,} streamlines in {:6.2f}".format(NB_STREAMLINES, - mtime_old)) + print(f"Old: Loaded {NB_STREAMLINES:,} streamlines in {mtime_old:6.2f}") trk = nib.streamlines.load(trk_file, lazy_load=False) streamlines_new = trk.streamlines mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)', repeat) - print("\nNew: Loaded {:,} streamlines in {:6.2}".format(NB_STREAMLINES, - mtime_new)) - print("Speedup of {:.2f}".format(mtime_old / mtime_new)) + print(f"\nNew: Loaded {NB_STREAMLINES:,} streamlines in {mtime_new:6.2}") + print(f"Speedup of {mtime_old / mtime_new:.2f}") for s1, s2 in zip(streamlines_new, streamlines_old): assert_array_equal(s1, s2) @@ -81,7 +79,7 @@ def bench_load_trk(): repeat) msg = "New: Loaded {:,} streamlines with scalars in {:6.2f}" print(msg.format(NB_STREAMLINES, mtime_new)) - print("Speedup of {:2f}".format(mtime_old / mtime_new)) + print(f"Speedup of {mtime_old / mtime_new:2f}") for s1, s2 in zip(scalars_new, scalars_old): assert_array_equal(s1, s2) diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index bea5872272..4cc521ab66 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -7,8 +7,6 @@ def print_git_title(title): """ Prints title string with git hash if possible, and underline """ - title = '{0} for git revision {1}'.format( - title, - get_info()['commit_hash']) + title = f"{title} for git revision {get_info()['commit_hash']}" print(title) print('-' * len(title)) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 2afd5b2c89..7693818e7b 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -118,11 +118,9 @@ def _unpack_var(var): 'Offending attribute:\n%s' % var) atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: - raise AFNIHeaderError('Invalid attribute type entry in HEAD file. ' - '%s' % err_msg) + raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') if len(aname) != 1: - raise AFNIHeaderError('Invalid attribute name entry in HEAD file. ' - '%s' % err_msg) + raise AFNIHeaderError(f'Invalid attribute name entry in HEAD file. {err_msg}') atype = _attr_dic.get(atype[0], str) attr = ' '.join(var.strip().splitlines()[3:]) if atype is not str: diff --git a/nibabel/casting.py b/nibabel/casting.py index 8406824dbe..7f2e8d2d5f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -268,7 +268,7 @@ def type_info(np_type): # and then give up. At this stage we're expecting exotic longdouble or # their complex equivalent. if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): - raise FloatingError('We had not expected type %s' % np_type) + raise FloatingError(f'We had not expected type {np_type}') if (vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024)): # double pair on PPC. The _check_nmant routine does not work for this @@ -402,7 +402,7 @@ def as_int(x, check=True): return ix fx = np.floor(x) if check and fx != x: - raise FloatingError('Not an integer: %s' % x) + raise FloatingError(f'Not an integer: {x}') if not fx.dtype.type == np.longdouble: return int(x) # Subtract float64 chunks until we have all of the number. If the int is diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index bd86ebfaa7..d011d6e8e3 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -92,7 +92,7 @@ class Cifti2HeaderError(Exception): def _value_if_klass(val, klass): if val is None or isinstance(val, klass): return val - raise ValueError('Not a valid %s instance.' % klass.__name__) + raise ValueError(f'Not a valid {klass.__name__} instance.') def _underscore(string): diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index c4c47007db..f1495552bd 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -348,7 +348,7 @@ def from_mask(cls, mask, name='other', affine=None): else: affine = np.asanyarray(affine) if affine.shape != (4, 4): - raise ValueError("Affine transformation should be a 4x4 array or None, not %r" % affine) + raise ValueError(f"Affine transformation should be a 4x4 array or None, not {affine!r}") mask = np.asanyarray(mask) if mask.ndim == 1: @@ -533,9 +533,9 @@ def to_cifti_brain_structure_name(name): orientation = 'both' structure = name if orientation.lower() == 'both': - proposed_name = 'CIFTI_STRUCTURE_%s' % structure.upper() + proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}' else: - proposed_name = 'CIFTI_STRUCTURE_%s_%s' % (structure.upper(), orientation.upper()) + proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: raise ValueError('%s was interpreted as %s, which is not a valid CIFTI brain structure' % (name, proposed_name)) @@ -990,9 +990,9 @@ def __getitem__(self, item): if isinstance(item, str): idx = np.where(self.name == item)[0] if len(idx) == 0: - raise IndexError("Parcel %s not found" % item) + raise IndexError(f"Parcel {item} not found") if len(idx) > 1: - raise IndexError("Multiple parcels with name %s found" % item) + raise IndexError(f"Multiple parcels with name {item} found") return self.voxels[idx[0]], self.vertices[idx[0]] if isinstance(item, int): return self.get_element(item) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 0fef5ccd78..df4fe10fcd 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -330,7 +330,7 @@ def test_read_parcels(): assert len(vertices) == length assert vertices[0] == first_element assert vertices[-1] == last_element - assert vertices.brain_structure == 'CIFTI_STRUCTURE_CORTEX_%s' % orientation + assert vertices.brain_structure == f'CIFTI_STRUCTURE_CORTEX_{orientation}' @needs_nibabel_data('nitest-cifti2') diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 944a1c1576..65ef95c316 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -125,7 +125,7 @@ def create_parcel_map(applies_to_matrix_dimension): volume = ci.Cifti2VoxelIndicesIJK(element) mapping.append(ci.Cifti2Parcel(name, volume, surfaces)) - mapping.extend([ci.Cifti2Surface('CIFTI_STRUCTURE_CORTEX_%s' % orientation, + mapping.extend([ci.Cifti2Surface(f'CIFTI_STRUCTURE_CORTEX_{orientation}', number_of_vertices) for orientation in ['LEFT', 'RIGHT']]) mapping.volume = ci.Cifti2Volume(dimensions, ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine)) @@ -148,7 +148,7 @@ def check_parcel_map(mapping): assert parcel.voxel_indices_ijk._indices == element for surface, orientation in zip(mapping.surfaces, ('LEFT', 'RIGHT')): - assert surface.brain_structure == 'CIFTI_STRUCTURE_CORTEX_%s' % orientation + assert surface.brain_structure == f'CIFTI_STRUCTURE_CORTEX_{orientation}' assert surface.surface_number_of_vertices == number_of_vertices assert mapping.volume.volume_dimensions == dimensions diff --git a/nibabel/cmdline/conform.py b/nibabel/cmdline/conform.py index 65b4ccc388..cfa86b6951 100644 --- a/nibabel/cmdline/conform.py +++ b/nibabel/cmdline/conform.py @@ -34,7 +34,7 @@ def _get_parser(): help="Orientation of the conformed output.") p.add_argument("-f", "--force", action="store_true", help="Overwrite existing output files.") - p.add_argument("-V", "--version", action="version", version="{} {}".format(p.prog, __version__)) + p.add_argument("-V", "--version", action="version", version=f"{p.prog} {__version__}") return p @@ -46,7 +46,7 @@ def main(args=None): from_img = load(opts.infile) if not opts.force and Path(opts.outfile).exists(): - raise FileExistsError("Output file exists: {}".format(opts.outfile)) + raise FileExistsError(f"Output file exists: {opts.outfile}") out_img = conform( from_img=from_img, diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index c54c07f966..6663da9299 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -71,31 +71,31 @@ def get_paths(self): for study in dft.get_studies(self.dicom_path, self.followlinks): pd = paths.setdefault(study.patient_name_or_uid(), {}) patient_info = 'patient information\n' - patient_info = 'name: %s\n' % study.patient_name - patient_info += 'ID: %s\n' % study.patient_id - patient_info += 'birth date: %s\n' % study.patient_birth_date - patient_info += 'sex: %s\n' % study.patient_sex + patient_info = f'name: {study.patient_name}\n' + patient_info += f'ID: {study.patient_id}\n' + patient_info += f'birth date: {study.patient_birth_date}\n' + patient_info += f'sex: {study.patient_sex}\n' pd['INFO'] = patient_info.encode('ascii', 'replace') - study_datetime = '%s_%s' % (study.date, study.time) + study_datetime = f'{study.date}_{study.time}' study_info = 'study info\n' - study_info += 'UID: %s\n' % study.uid - study_info += 'date: %s\n' % study.date - study_info += 'time: %s\n' % study.time - study_info += 'comments: %s\n' % study.comments + study_info += f'UID: {study.uid}\n' + study_info += f'date: {study.date}\n' + study_info += f'time: {study.time}\n' + study_info += f'comments: {study.comments}\n' d = {'INFO': study_info.encode('ascii', 'replace')} for series in study.series: series_info = 'series info\n' - series_info += 'UID: %s\n' % series.uid - series_info += 'number: %s\n' % series.number - series_info += 'description: %s\n' % series.description + series_info += f'UID: {series.uid}\n' + series_info += f'number: {series.number}\n' + series_info += f'description: {series.description}\n' series_info += 'rows: %d\n' % series.rows series_info += 'columns: %d\n' % series.columns series_info += 'bits allocated: %d\n' % series.bits_allocated series_info += 'bits stored: %d\n' % series.bits_stored series_info += 'storage instances: %d\n' % len(series.storage_instances) d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), - '%s.nii' % series.number: (series.nifti_size, series.as_nifti), - '%s.png' % series.number: (series.png_size, series.as_png)} + f'{series.number}.nii': (series.nifti_size, series.as_nifti), + f'{series.number}.png': (series.png_size, series.as_png)} pd[study_datetime] = d return paths @@ -105,7 +105,7 @@ def match_path(self, path): logger.debug('return root') return wd for part in path.lstrip('/').split('/'): - logger.debug("path:%s part:%s" % (path, part)) + logger.debug(f"path:{path} part:{part}") if part not in wd: return None wd = wd[part] @@ -113,20 +113,20 @@ def match_path(self, path): return wd def readdir(self, path, fh): - logger.info('readdir %s' % (path,)) + logger.info(f'readdir {path}') matched_path = self.match_path(path) if matched_path is None: return -errno.ENOENT - logger.debug('matched %s' % (matched_path,)) + logger.debug(f'matched {matched_path}') fnames = [k.encode('ascii', 'replace') for k in matched_path.keys()] fnames.append('.') fnames.append('..') return [fuse.Direntry(f) for f in fnames] def getattr(self, path): - logger.debug('getattr %s' % path) + logger.debug(f'getattr {path}') matched_path = self.match_path(path) - logger.debug('matched: %s' % (matched_path,)) + logger.debug(f'matched: {matched_path}') now = time.time() st = fuse.Stat() if isinstance(matched_path, dict): @@ -161,7 +161,7 @@ def getattr(self, path): return -errno.ENOENT def open(self, path, flags): - logger.debug('open %s' % (path,)) + logger.debug(f'open {path}') matched_path = self.match_path(path) if matched_path is None: return -errno.ENOENT @@ -223,7 +223,7 @@ def main(args=None): logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) if len(files) != 2: - sys.stderr.write("Please provide two arguments:\n%s\n" % parser.usage) + sys.stderr.write(f"Please provide two arguments:\n{parser.usage}\n") sys.exit(1) fs = DICOMFS( diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index c745ff7abc..f1e4958e8f 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -32,7 +32,7 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, version="%prog " + nib.__version__) p.add_options([ diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index ea2e4032ae..08f975bc3d 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -31,7 +31,7 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] [FILE ...]\n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, version="%prog " + nib.__version__) p.add_options([ @@ -65,20 +65,20 @@ def get_opt_parser(): def proc_file(f, opts): - verbose(1, "Loading %s" % f) + verbose(1, f"Loading {f}") - row = ["@l%s" % f] + row = [f"@l{f}"] try: vol = nib.load(f) h = vol.header except Exception as e: row += ['failed'] - verbose(2, "Failed to gather information -- %s" % str(e)) + verbose(2, f"Failed to gather information -- {str(e)}") return row row += [str(safe_get(h, 'data_dtype')), - '@l[%s]' % ap(safe_get(h, 'data_shape'), '%3g'), - '@l%s' % ap(safe_get(h, 'zooms'), '%.2f', 'x')] + f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]", + f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"] # Slope if hasattr(h, 'has_data_slope') and \ (h.has_data_slope or h.has_data_intercept) and \ @@ -116,7 +116,7 @@ def proc_file(f, opts): else: row += [''] except Exception as e: - verbose(2, "Failed to obtain qform or sform -- %s" % str(e)) + verbose(2, f"Failed to obtain qform or sform -- {str(e)}") if isinstance(h, nib.AnalyzeHeader): row += [''] else: @@ -136,7 +136,7 @@ def proc_file(f, opts): # just # of elements row += ["@l[%d]" % np.prod(d.shape)] # stats - row += [len(d) and '@l[%.2g, %.2g]' % (np.min(d), np.max(d)) or '-'] + row += [len(d) and f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' or '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: @@ -146,7 +146,7 @@ def proc_file(f, opts): counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) row += ["@l" + counts] except IOError as e: - verbose(2, "Failed to obtain stats/counts -- %s" % str(e)) + verbose(2, f"Failed to obtain stats/counts -- {str(e)}") row += [_err()] return row diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 259c24d97d..51867da065 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -23,7 +23,7 @@ def main(args=None): """ Go go team """ parser = OptionParser( - usage="%s [FILE ...]\n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [FILE ...]\n\n" + __doc__, version="%prog " + nib.__version__) (opts, files) = parser.parse_args(args=args) @@ -32,7 +32,7 @@ def main(args=None): hdr = fobj.read(nib.nifti1.header_dtype.itemsize) result = nib.Nifti1Header.diagnose_binaryblock(hdr) if len(result): - print('Picky header check output for "%s"\n' % fname) + print(f'Picky header check output for "{fname}\"\n') print(result + '\n') else: - print('Header for "%s" is clean' % fname) + print(f'Header for "{fname}" is clean') diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 0dfa03cac9..917615e620 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -22,7 +22,7 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="%s [OPTIONS] \n\n" % sys.argv[0] + __doc__, + usage=f"{sys.argv[0]} [OPTIONS] \n\n" + __doc__, version="%prog " + nibabel.__version__) p.add_option( Option("-v", "--verbose", action="store_true", dest="verbose", @@ -136,7 +136,7 @@ def get_opt_parser(): def verbose(msg, indent=0): if verbose.switch: - print("%s%s" % (' ' * indent, msg)) + print(f"{' ' * indent}{msg}") def error(msg, exit_code): @@ -172,7 +172,7 @@ def proc_file(infile, opts): affine = pr_hdr.get_affine(origin=opts.origin) slope, intercept = pr_hdr.get_data_scaling(scaling) if opts.scaling != 'off': - verbose('Using data scaling "%s"' % opts.scaling) + verbose(f'Using data scaling "{opts.scaling}"') # get original scaling, and decide if we scale in-place or not if opts.scaling == 'off': slope = np.array([1.]) @@ -208,8 +208,7 @@ def proc_file(infile, opts): bad_mask = np.logical_and(bvals != 0, (bvecs == 0).all(axis=1)) if bad_mask.sum() > 0: pl = 's' if bad_mask.sum() != 1 else '' - verbose('Removing %s DTI trace volume%s' - % (bad_mask.sum(), pl)) + verbose(f'Removing {bad_mask.sum()} DTI trace volume{pl}') good_mask = ~bad_mask in_data = in_data[..., good_mask] bvals = bvals[good_mask] @@ -243,7 +242,7 @@ def proc_file(infile, opts): dump_ext = nifti1.Nifti1Extension('comment', hdr_dump) nhdr.extensions.append(dump_ext) - verbose('Writing %s' % outfilename) + verbose(f'Writing {outfilename}') nibabel.save(nimg, outfilename) # write out bvals/bvecs if requested @@ -256,7 +255,7 @@ def proc_file(infile, opts): with open(basefilename + '.bvals', 'w') as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: - fid.write('%s ' % val) + fid.write(f'{val} ') fid.write('\n') else: verbose('Writing .bvals and .bvecs files') @@ -267,12 +266,12 @@ def proc_file(infile, opts): with open(basefilename + '.bvals', 'w') as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: - fid.write('%s ' % val) + fid.write(f'{val} ') fid.write('\n') with open(basefilename + '.bvecs', 'w') as fid: for row in bvecs.T: for val in row: - fid.write('%s ' % val) + fid.write(f'{val} ') fid.write('\n') # export data labels varying along the 4th dimensions if requested @@ -299,7 +298,7 @@ def proc_file(infile, opts): verbose('Writing dwell time (%r sec) calculated assuming %sT ' 'magnet' % (dwell_time, opts.field_strength)) with open(basefilename + '.dwell_time', 'w') as fid: - fid.write('%r\n' % dwell_time) + fid.write(f'{dwell_time!r}\n') # done @@ -310,18 +309,18 @@ def main(): verbose.switch = opts.verbose if opts.origin not in ['scanner', 'fov']: - error("Unrecognized value for --origin: '%s'." % opts.origin, 1) + error(f"Unrecognized value for --origin: '{opts.origin}'.", 1) if opts.dwell_time and opts.field_strength is None: error('Need --field-strength for dwell time calculation', 1) # store any exceptions errs = [] for infile in infiles: - verbose('Processing %s' % infile) + verbose(f'Processing {infile}') try: proc_file(infile, opts) except Exception as e: - errs.append('%s: %s' % (infile, e)) + errs.append(f'{infile}: {e}') if len(errs): error('Caught %i exceptions. Dump follows:\n\n %s' diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index deb3adcd5f..9b359babaf 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -35,7 +35,7 @@ def main(): for tractogram in args.tractograms: tractogram_format = nib.streamlines.detect_format(tractogram) if tractogram_format is not nib.streamlines.TckFile: - print("Skipping non TCK file: '{}'".format(tractogram)) + print(f"Skipping non TCK file: '{tractogram}'") continue filename, _ = os.path.splitext(tractogram) diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index a55f7e95af..efdcf1fd02 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -25,7 +25,7 @@ def main(): for tractogram in args.tractograms: tractogram_format = nib.streamlines.detect_format(tractogram) if tractogram_format is not nib.streamlines.TrkFile: - print("Skipping non TRK file: '{}'".format(tractogram)) + print(f"Skipping non TRK file: '{tractogram}'") continue filename, _ = os.path.splitext(tractogram) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 57c0ccc286..8931beb617 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -33,7 +33,7 @@ def verbose(thing, msg): """ # TODO: consider using nibabel's logger if thing <= int(verbose_level): - print("%s%s" % (' ' * thing, msg)) + print(f"{' ' * thing}{msg}") def table2string(table, out=None): @@ -78,8 +78,7 @@ def table2string(table, out=None): align = item[1] item = item[2:] if align not in ['l', 'r', 'c', 'w']: - raise ValueError('Unknown alignment %s. Known are l,r,c' % - align) + raise ValueError(f'Unknown alignment {align}. Known are l,r,c') else: align = 'c' @@ -93,7 +92,7 @@ def table2string(table, out=None): elif align == 'r': nspacesl, nspacesr = nspacesl + nspacesr, 0 else: - raise RuntimeError('Should not get here with align=%s' % align) + raise RuntimeError(f'Should not get here with align={align}') string_ += "%%%ds%%s%%%ds " \ % (nspacesl, nspacesr) % ('', item, '') @@ -121,5 +120,5 @@ def safe_get(obj, name): f = getattr(obj, 'get_' + name) return f() except Exception as e: - verbose(2, "get_%s() failed -- %s" % (name, e)) + verbose(2, f"get_{name}() failed -- {e}") return '-' diff --git a/nibabel/data.py b/nibabel/data.py index 793b6b310c..adce51b92c 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -128,11 +128,11 @@ def __init__(self, base_path, config_filename=None): cfg_file = self.get_filename(config_filename) readfiles = self.config.read(cfg_file) if not readfiles: - raise DataError('Could not read config file %s' % cfg_file) + raise DataError(f'Could not read config file {cfg_file}') try: self.version = self.config.get('DEFAULT', 'version') except configparser.Error: - raise DataError('Could not get version from %s' % cfg_file) + raise DataError(f'Could not get version from {cfg_file}') version_parts = self.version.split('.') self.major_version = int(version_parts[0]) self.minor_version = int(version_parts[1]) @@ -294,12 +294,11 @@ def make_datasource(pkg_def, **kwargs): pth = [pjoin(this_data_path, *names) for this_data_path in data_path] pkg_hint = pkg_def.get('install hint', DEFAULT_INSTALL_HINT) - msg = ('%s; Is it possible you have not installed a data package?' % - e) + msg = (f'{e}; Is it possible you have not installed a data package?') if 'name' in pkg_def: - msg += '\n\nYou may need the package "%s"' % pkg_def['name'] + msg += f"\n\nYou may need the package \"{pkg_def['name']}\"" if pkg_hint is not None: - msg += '\n\n%s' % pkg_hint + msg += f'\n\n{pkg_hint}' raise DataError(msg) return VersionedDatasource(pth) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index e0e3d52849..68972a8cb1 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -344,7 +344,7 @@ def get_fdata(self, caching='fill', dtype=np.float64): raise ValueError('caching value should be "fill" or "unchanged"') dtype = np.dtype(dtype) if not issubclass(dtype.type, np.inexact): - raise ValueError('{} should be floating point type'.format(dtype)) + raise ValueError(f'{dtype} should be floating point type') # Return cache if cache present and of correct dtype. if self._fdata_cache is not None: if self._fdata_cache.dtype.type == dtype.type: diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 1a0f85330d..2dd1f11db3 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -36,7 +36,7 @@ def __getattr__(self, key): return getattr(mod, key) def __repr__(self): - return "".format(self._module_name) + return f"" class FutureWarningMixin(object): diff --git a/nibabel/dft.py b/nibabel/dft.py index f46259b232..2768f1ec59 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -279,7 +279,7 @@ def _get_subdirs(base_dir, files_dict=None, followlinks=False): for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=followlinks): abs_dir = os.path.realpath(dirpath) if abs_dir in dirs: - raise CachingError('link cycle detected under %s' % base_dir) + raise CachingError(f'link cycle detected under {base_dir}') dirs.append(abs_dir) if files_dict is not None: files_dict[abs_dir] = filenames @@ -306,7 +306,7 @@ def update_cache(base_dir, followlinks=False): for dir in sorted(mtimes.keys()): if dir in db_mtimes and mtimes[dir] <= db_mtimes[dir]: continue - logger.debug('updating %s' % dir) + logger.debug(f'updating {dir}') _update_dir(c, dir, files_by_dir[dir], studies, series, storage_instances) if dir in db_mtimes: @@ -353,20 +353,20 @@ def get_studies(base_dir=None, followlinks=False): def _update_dir(c, dir, files, studies, series, storage_instances): - logger.debug('Updating directory %s' % dir) + logger.debug(f'Updating directory {dir}') c.execute("SELECT name, mtime FROM file WHERE directory = ?", (dir, )) db_mtimes = dict(c) for fname in db_mtimes: if fname not in files: - logger.debug(' remove %s' % fname) + logger.debug(f' remove {fname}') c.execute("DELETE FROM file WHERE directory = ? AND name = ?", (dir, fname)) for fname in files: - mtime = os.lstat('%s/%s' % (dir, fname)).st_mtime + mtime = os.lstat(f'{dir}/{fname}').st_mtime if fname in db_mtimes and mtime <= db_mtimes[fname]: - logger.debug(' okay %s' % fname) + logger.debug(f' okay {fname}') else: - logger.debug(' update %s' % fname) + logger.debug(f' update {fname}') si_uid = _update_file(c, dir, fname, studies, series, storage_instances) if fname not in db_mtimes: @@ -386,7 +386,7 @@ def _update_dir(c, dir, files, studies, series, storage_instances): def _update_file(c, path, fname, studies, series, storage_instances): try: - do = read_file('%s/%s' % (path, fname)) + do = read_file(f'{path}/{fname}') except pydicom.filereader.InvalidDicomError: logger.debug(' not a DICOM file') return None @@ -395,7 +395,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): except AttributeError: study_comments = '' try: - logger.debug(' storage instance %s' % str(do.SOPInstanceUID)) + logger.debug(f' storage instance {str(do.SOPInstanceUID)}') if str(do.StudyInstanceUID) not in studies: query = """INSERT INTO study (uid, date, @@ -444,7 +444,7 @@ def _update_file(c, path, fname, studies, series, storage_instances): c.execute(query, params) storage_instances.append(str(do.SOPInstanceUID)) except AttributeError as data: - logger.debug(' %s' % str(data)) + logger.debug(f' {str(data)}') return None return str(do.SOPInstanceUID) @@ -486,7 +486,7 @@ def clear_cache(): mtime INTEGER NOT NULL, storage_instance TEXT DEFAULT NULL REFERENCES storage_instance, PRIMARY KEY (directory, name))""") -DB_FNAME = pjoin(tempfile.gettempdir(), 'dft.%s.sqlite' % getpass.getuser()) +DB_FNAME = pjoin(tempfile.gettempdir(), f'dft.{getpass.getuser()}.sqlite') DB = None diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index a0099ec6b4..b2e2c9a868 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -258,7 +258,7 @@ def __init__(self, filename, mode='r', mmap=None, version=1, else: # maybe it's a string self.filename = filename omode = 'r+' if mode == 'a' else mode - self.fp = open(self.filename, '%sb' % omode) + self.fp = open(self.filename, f'{omode}b') if mmap is None: # Mmapped files on PyPy cannot be usually closed # before the GC runs, so it's better to use mmap=False @@ -397,7 +397,7 @@ def createVariable(self, name, type, dimensions): type = dtype(type) typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: - raise ValueError("NetCDF 3 does not support type %s" % type) + raise ValueError(f"NetCDF 3 does not support type {type}") data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 self.variables[name] = netcdf_variable( @@ -589,7 +589,7 @@ def _write_att_values(self, values): break typecode, size = TYPEMAP[nc_type] - dtype_ = '>%s' % typecode + dtype_ = f'>{typecode}' # asarray() dies with bytes and '>c' in py3k. Change to 'S' dtype_ = 'S' if dtype_ == '>c' else dtype_ @@ -614,8 +614,7 @@ def _read(self): # Check magic bytes and version magic = self.fp.read(3) if not magic == b'CDF': - raise TypeError("Error: %s is not a valid NetCDF 3 file" % - self.filename) + raise TypeError(f"Error: {self.filename} is not a valid NetCDF 3 file") self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] # Read file headers and set data. @@ -762,7 +761,7 @@ def _read_var(self): begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() typecode, size = TYPEMAP[nc_type] - dtype_ = '>%s' % typecode + dtype_ = f'>{typecode}' return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize @@ -777,7 +776,7 @@ def _read_att_values(self): self.fp.read(-count % 4) # read padding if typecode != 'c': - values = frombuffer(values, dtype='>%s' % typecode).copy() + values = frombuffer(values, dtype=f'>{typecode}').copy() if values.shape == (1,): values = values[0] else: diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py index 0a29c661c5..3c49f8f856 100644 --- a/nibabel/externals/oset.py +++ b/nibabel/externals/oset.py @@ -72,8 +72,8 @@ def pop(self, last=True): def __repr__(self): if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self)) + return f'{self.__class__.__name__}()' + return f'{self.__class__.__name__}({list(self)!r})' def __eq__(self, other): if isinstance(other, OrderedSet): diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index fdc8a00e7f..436c2cd676 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -301,8 +301,7 @@ def filespec_to_file_map(klass, filespec): trailing_suffixes=klass._compressed_suffixes) except TypesFilenamesError: raise ImageFileError( - 'Filespec "{0}" does not look right for class {1}'.format( - filespec, klass)) + f'Filespec "{filespec}" does not look right for class {klass}') file_map = {} for key, fname in filenames.items(): file_map[key] = FileHolder(filename=fname) diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 5d84a9d6dc..af0ff74541 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -134,8 +134,7 @@ def types_filenames(template_fname, types_exts, # is ignored). It's confusing to change # this to test.img.gz, or test.gz.img, so error raise TypesFilenamesError( - 'Confusing ignored suffix %s without extension' - % ignored) + f'Confusing ignored suffix {ignored} without extension') # if we've got to here, we have a guessed name and a found # extension. else: # not enforcing extensions. If there's an extension, we set the diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 6c4616196f..0bb987c8be 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -426,7 +426,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, action = heuristic(slicer, dim_len, stride) # Check return values (we may be using a custom function) if action not in ('full', 'contiguous', None): - raise ValueError('Unexpected return %s from heuristic' % action) + raise ValueError(f'Unexpected return {action} from heuristic') if is_int and action == 'contiguous': raise ValueError("int index cannot be contiguous") # If this is the slowest changing dimension, never upgrade None or diff --git a/nibabel/fileutils.py b/nibabel/fileutils.py index b88e2f7128..c518cdd921 100644 --- a/nibabel/fileutils.py +++ b/nibabel/fileutils.py @@ -52,8 +52,7 @@ def read_zt_byte_strings(fobj, n_strings=1, bufsize=1024): if eof or n_found >= n_strings: break if n_found < n_strings: - raise ValueError('Expected {0} strings, found {1}'.format( - n_strings, n_found)) + raise ValueError(f'Expected {n_strings} strings, found {n_found}') n_extra = n_found - n_strings leftover_strings = byte_strings[n_strings:] + [trailing] # Add number of extra strings to account for lost terminal 0s diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 2bcbbffb1d..467797ab51 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -228,12 +228,11 @@ def write_geometry(filepath, coords, faces, create_stamp=None, magic_bytes = np.array([255, 255, 254], dtype=np.uint8) if create_stamp is None: - create_stamp = "created by %s on %s" % (getpass.getuser(), - time.ctime()) + create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" with open(filepath, 'wb') as fobj: magic_bytes.tofile(fobj) - fobj.write(("%s\n\n" % create_stamp).encode('utf-8')) + fobj.write((f"{create_stamp}\n\n").encode('utf-8')) np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj) @@ -309,8 +308,7 @@ def write_morph_data(file_like, values, fnum=0): if vnum > i4info.max: raise ValueError("Too many values for morphometry file") if not i4info.min <= fnum <= i4info.max: - raise ValueError("Argument fnum must be between {0} and {1}".format( - i4info.min, i4info.max)) + raise ValueError(f"Argument fnum must be between {i4info.min} and {i4info.max}") with Opener(file_like, 'wb') as fobj: fobj.write(magic_bytes) @@ -537,8 +535,7 @@ def write_string(s): if fill_ctab: ctab = np.hstack((ctab[:, :4], _pack_rgb(ctab[:, :3]))) elif not np.array_equal(ctab[:, [4]], _pack_rgb(ctab[:, :3])): - warnings.warn('Annotation values in {} will be incorrect'.format( - filepath)) + warnings.warn(f'Annotation values in {filepath} will be incorrect') # vtxct write(vnum) @@ -605,7 +602,7 @@ def _serialize_volume_info(volume_info): 'zras', 'cras'] diff = set(volume_info.keys()).difference(keys) if len(diff) > 0: - raise ValueError('Invalid volume info: %s.' % diff.pop()) + raise ValueError(f'Invalid volume info: {diff.pop()}.') strings = list() for key in keys: @@ -616,11 +613,10 @@ def _serialize_volume_info(volume_info): strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) elif key in ('valid', 'filename'): val = volume_info[key] - strings.append('{0} = {1}\n'.format(key, val).encode('utf-8')) + strings.append(f'{key} = {val}\n'.encode('utf-8')) elif key == 'volume': val = volume_info[key] - strings.append('{0} = {1} {2} {3}\n'.format( - key, val[0], val[1], val[2]).encode('utf-8')) + strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode('utf-8')) else: val = volume_info[key] strings.append('{0} = {1:0.10g} {2:0.10g} {3:0.10g}\n'.format( diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 72a754efe8..0a9c4fab17 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -223,7 +223,7 @@ def set_data_dtype(self, datatype): try: code = self._data_type_codes[datatype] except KeyError: - raise MGHError('datatype dtype "%s" not recognized' % datatype) + raise MGHError(f'datatype dtype "{datatype}" not recognized') self._structarr['type'] = code def _ndims(self): @@ -284,8 +284,7 @@ def set_zooms(self, zooms): hdr['delta'] = zooms[:3] if len(zooms) == 4: if zooms[3] < 0: - raise HeaderDataError('TR must be non-negative; got {!r}' - ''.format(zooms[3])) + raise HeaderDataError(f'TR must be non-negative; got {zooms[3]!r}') hdr['tr'] = zooms[3] def get_data_shape(self): diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 382dfbb186..ba44c14f13 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -36,7 +36,7 @@ have_freesurfer = isdir(data_path) freesurfer_test = unittest.skipUnless(have_freesurfer, - 'cannot find freesurfer {0} directory'.format(DATA_SDIR)) + f'cannot find freesurfer {DATA_SDIR} directory') def _hash_file_content(fname): hasher = hashlib.md5() @@ -49,12 +49,12 @@ def _hash_file_content(fname): @freesurfer_test def test_geometry(): """Test IO of .surf""" - surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated")) + surf_path = pjoin(data_path, "surf", f"{'lh'}.{'inflated'}") coords, faces = read_geometry(surf_path) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 - surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "sphere")) + surf_path = pjoin(data_path, "surf", f"{'lh'}.{'sphere'}") coords, faces, volume_info, create_stamp = read_geometry( surf_path, read_metadata=True, read_stamp=True) @@ -68,8 +68,7 @@ def test_geometry(): # with respect to read_geometry() with InTemporaryDirectory(): surf_path = 'test' - create_stamp = "created by %s on %s" % (getpass.getuser(), - time.ctime()) + create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" volume_info['cras'] = [1., 2., 3.] write_geometry(surf_path, coords, faces, create_stamp, volume_info) @@ -133,7 +132,7 @@ def test_quad_geometry(): @freesurfer_test def test_morph_data(): """Test IO of morphometry data file (eg. curvature).""" - curv_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "curv")) + curv_path = pjoin(data_path, "surf", f"{'lh'}.{'curv'}") curv = read_morph_data(curv_path) assert -1.0 < curv.min() < 0 assert 0 < curv.max() < 1.0 @@ -171,7 +170,7 @@ def test_annot(): """Test IO of .annot against freesurfer example data.""" annots = ['aparc', 'aparc.a2005s'] for a in annots: - annot_path = pjoin(data_path, "label", "%s.%s.annot" % ("lh", a)) + annot_path = pjoin(data_path, "label", f"{'lh'}.{a}.annot") hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) @@ -214,7 +213,7 @@ def test_read_write_annot(): # 3 colours in the LUT. nvertices = 10 nlabels = 3 - names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + names = [f'label {l}' for l in range(1, nlabels + 1)] # randomly generate a label for each vertex, making sure # that at least one of each label value is present. Label # values are in the range (0, nlabels-1) - they are used @@ -248,7 +247,7 @@ def test_write_annot_fill_ctab(): """Test the `fill_ctab` parameter to :func:`.write_annot`. """ nvertices = 10 nlabels = 3 - names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + names = [f'label {l}' for l in range(1, nlabels + 1)] labels = list(range(nlabels)) + \ list(np.random.randint(0, nlabels, nvertices - nlabels)) labels = np.array(labels, dtype=np.int32) @@ -269,7 +268,7 @@ def test_write_annot_fill_ctab(): rgbal = np.hstack((rgba, badannot)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert any('Annotation values in {} will be incorrect'.format(annot_path) == str(ww.message) + assert any(f'Annotation values in {annot_path} will be incorrect' == str(ww.message) for ww in w) labels2, rgbal2, names2 = read_annot(annot_path, orig_ids=True) names2 = [n.decode('ascii') for n in names2] @@ -284,7 +283,7 @@ def test_write_annot_fill_ctab(): rgbal[:, 2] * (2 ** 16)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert all('Annotation values in {} will be incorrect'.format(annot_path) != str(ww.message) + assert all(f'Annotation values in {annot_path} will be incorrect' != str(ww.message) for ww in w) labels2, rgbal2, names2 = read_annot(annot_path) names2 = [n.decode('ascii') for n in names2] @@ -322,7 +321,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): with InTemporaryDirectory(): nverts = 10 nlabels = 3 - names = ['Label {}'.format(l) for l in range(nlabels)] + names = [f'Label {l}' for l in range(nlabels)] labels = np.concatenate(( np.arange(nlabels), np.random.randint(0, nlabels, nverts - nlabels))) np.random.shuffle(labels) @@ -356,7 +355,7 @@ def test_write_annot_maxstruct(): """Test writing ANNOT files with repeated labels""" with InTemporaryDirectory(): nlabels = 3 - names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + names = [f'label {l}' for l in range(1, nlabels + 1)] # max label < n_labels labels = np.array([1, 1, 1], dtype=np.int32) rgba = np.array(np.random.randint(0, 255, (nlabels, 4)), dtype=np.int32) diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 21246d8ec6..1271b3e926 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -134,8 +134,7 @@ def concat_images(images, check_affines=True, axis=None): for i, img in enumerate(images): if len(img.shape) != n_dim: raise ValueError( - 'Image {0} has {1} dimensions, image 0 has {2}'.format( - i, len(img.shape), n_dim)) + f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') if not np.all(np.array(img.shape)[idx_mask] == masked_shape): raise ValueError('shape {0} for image {1} not compatible with ' 'first image shape {2} with axis == {3}'.format( diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 8fec096259..2bc10906d4 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -487,7 +487,7 @@ def to_xml_open(self): \tExternalFileOffset="%d">\n""" di = "" for i, n in enumerate(self.dims): - di = di + '\tDim%s=\"%s\"\n' % (str(i), str(n)) + di = di + f'\tDim{str(i)}="{str(n)}\"\n' return out % (intent_codes.niistring[self.intent], data_type_codes.niistring[self.datatype], array_index_order_codes.label[self.ind_ord], @@ -838,7 +838,7 @@ def print_summary(self): print(self.labeltable.print_summary()) for i, da in enumerate(self.darrays): print('----') - print('DataArray %s:' % i) + print(f'DataArray {i}:') print(da.print_summary()) print('----end----') diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 044a70fede..3b60693478 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -177,7 +177,7 @@ def StartElementHandler(self, name, attrs): attrs["ArrayIndexingOrder"]] num_dim = int(attrs.get("Dimensionality", 0)) for i in range(num_dim): - di = "Dim%s" % str(i) + di = f"Dim{str(i)}" if di in attrs: self.da.dims.append(int(attrs[di])) # dimensionality has to correspond to the number of DimX given diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 54d8e78621..c323ae51df 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -106,8 +106,7 @@ def assert_default_types(loaded): continue loadedtype = type(getattr(loaded, attr)) assert loadedtype == defaulttype, ( - "Type mismatch for attribute: {} ({!s} != {!s})".format( - attr, loadedtype, defaulttype)) + f"Type mismatch for attribute: {attr} ({loadedtype!s} != {defaulttype!s})") def test_default_types(): diff --git a/nibabel/info.py b/nibabel/info.py index e9a48e42d1..5a344c6f8e 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -16,10 +16,7 @@ _version_extra = 'dev' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -VERSION = "%s.%s.%s%s" % (_version_major, - _version_minor, - _version_micro, - _version_extra) +VERSION = f"{_version_major}.{_version_minor}.{_version_micro}{_version_extra}" # Note: this long_description is the canonical place to edit this text. diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 85713aa24b..cedbe8e8e9 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -41,9 +41,9 @@ def load(filename, **kwargs): try: stat_result = os.stat(filename) except OSError: - raise FileNotFoundError("No such file or no access: '%s'" % filename) + raise FileNotFoundError(f"No such file or no access: '{filename}'") if stat_result.st_size <= 0: - raise ImageFileError("Empty file: '%s'" % filename) + raise ImageFileError(f"Empty file: '{filename}'") sniff = None for image_klass in all_image_classes: @@ -52,8 +52,7 @@ def load(filename, **kwargs): img = image_klass.from_filename(filename, **kwargs) return img - raise ImageFileError('Cannot work out file type of "%s"' % - filename) + raise ImageFileError(f'Cannot work out file type of "{filename}"') @deprecate_with_version('guessed_image_type deprecated.' @@ -78,8 +77,7 @@ def guessed_image_type(filename): if is_valid: return image_klass - raise ImageFileError('Cannot work out file type of "%s"' % - filename) + raise ImageFileError(f'Cannot work out file type of "{filename}"') def save(img, filename): @@ -130,8 +128,7 @@ def save(img, filename): valid_klasses = [klass for klass in all_image_classes if ext in klass.valid_exts] if not valid_klasses: # if list is empty - raise ImageFileError('Cannot work out file type of "%s"' % - filename) + raise ImageFileError(f'Cannot work out file type of "{filename}"') # Got a list of valid extensions, but that's no guarantee # the file conversion will work. So, try each image @@ -207,7 +204,7 @@ def read_img_data(img, prefer='scaled'): other formats with more complicated scaling - such as MINC. """ if prefer not in ('scaled', 'unscaled'): - raise ValueError('Invalid string "%s" for "prefer"' % prefer) + raise ValueError(f'Invalid string "{prefer}" for "prefer"') hdr = img.header if not hasattr(hdr, 'raw_data_from_fileobj'): # We can only do scaled diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 6ef089e301..8082608b73 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -56,7 +56,7 @@ def get_csa_header(dcm_data, csa_type='image'): elif csa_type == 'series': element_offset = 0x20 else: - raise ValueError('Invalid CSA header type "%s"' % csa_type) + raise ValueError(f'Invalid CSA header type "{csa_type}"') if not (0x29, 0x10) in dcm_data: # Cannot be Siemens CSA return None section_start = find_private_section(dcm_data, 0x29, 'SIEMENS CSA HEADER') @@ -123,8 +123,7 @@ def read(csa_str): if tag_no == 1: tag0_n_items = n_items if n_items > MAX_CSA_ITEMS: - raise CSAReadError('Expected <= {0} tags, got {1}'.format( - MAX_CSA_ITEMS, n_items)) + raise CSAReadError(f'Expected <= {MAX_CSA_ITEMS} tags, got {n_items}') items = [] for item_no in range(n_items): x0, x1, x2, x3 = up_str.unpack('4i') diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index dee8b507d5..5d5ea11799 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -83,7 +83,7 @@ def read_mosaic_dir(dicom_path, gradients = [] arrays = [] if len(filenames) == 0: - raise IOError('Found no files with "%s"' % full_globber) + raise IOError(f'Found no files with "{full_globber}"') for fname in filenames: dcm_w = wrapper_from_file(fname, **dicom_kwargs) # Because the routine sorts by filename, it only makes sense to use diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index b718b980aa..00e964928f 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -286,7 +286,7 @@ def series_signature(self): def __getitem__(self, key): """ Return values from DICOM object""" if key not in self.dcm_data: - raise KeyError('"%s" not in self.dcm_data' % key) + raise KeyError(f'"{key}" not in self.dcm_data') return self.dcm_data.get(key) def get(self, key, default=None): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 202decd8e0..c39831b70d 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -345,7 +345,7 @@ def __repr__(self): # deal with unknown codes code = self._code - s = "Nifti1Extension('%s', '%s')" % (code, self._content) + s = f"Nifti1Extension('{code}', '{self._content}')" return s def __eq__(self, other): @@ -519,7 +519,7 @@ def get_sizeondisk(self): return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - s = "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) + s = f"Nifti1Extensions({', '.join(str(e) for e in self)})" return s def __cmp__(self, other): @@ -1169,7 +1169,7 @@ def get_slope_inter(self): return None, None if not np.isfinite(inter): raise HeaderDataError( - 'Valid slope but invalid intercept {0}'.format(inter)) + f'Valid slope but invalid intercept {inter}') return slope, inter def set_slope_inter(self, slope, inter=None): @@ -1397,8 +1397,7 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): icode = code p_descr = ('p1', 'p2', 'p3') if len(params) and len(params) != len(p_descr): - raise HeaderDataError('Need params of form %s, or empty' - % (p_descr,)) + raise HeaderDataError(f'Need params of form {p_descr}, or empty') hdr['intent_code'] = icode hdr['intent_name'] = name all_params = [0] * 3 @@ -1615,8 +1614,7 @@ def _slice_time_order(self, slabel, n_slices): sp_ind_time_order = (list(range(n_slices - 2, -1, -2)) + list(range(n_slices - 1, -1, -2))) else: - raise HeaderDataError('We do not handle slice ordering "%s"' - % slabel) + raise HeaderDataError(f'We do not handle slice ordering "{slabel}"') return np.argsort(sp_ind_time_order) def get_xyzt_units(self): @@ -1682,8 +1680,7 @@ def _chk_magic(hdr, fix=False): magic = hdr['magic'].item() if magic in (hdr.pair_magic, hdr.single_magic): return hdr, rep - rep.problem_msg = ('magic string "%s" is not valid' % - asstr(magic)) + rep.problem_msg = (f'magic string "{asstr(magic)}" is not valid') rep.problem_level = 45 if fix: rep.fix_msg = 'leaving as is, but future errors are likely' @@ -1703,8 +1700,7 @@ def _chk_offset(hdr, fix=False): 'single file nifti1' % offset) if fix: hdr['vox_offset'] = hdr.single_vox_offset - rep.fix_msg = 'setting to minimum value of {0}'.format( - hdr.single_vox_offset) + rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' return hdr, rep if not offset % 16: return hdr, rep @@ -1895,7 +1891,7 @@ def set_qform(self, affine, code=None, strip_shears=True, **kwargs): """ update_affine = kwargs.pop('update_affine', True) if kwargs: - raise TypeError('Unexpected keyword argument(s) %s' % kwargs) + raise TypeError(f'Unexpected keyword argument(s) {kwargs}') self._header.set_qform(affine, code, strip_shears) if update_affine: if self._affine is None: @@ -1984,7 +1980,7 @@ def set_sform(self, affine, code=None, **kwargs): """ update_affine = kwargs.pop('update_affine', True) if kwargs: - raise TypeError('Unexpected keyword argument(s) %s' % kwargs) + raise TypeError(f'Unexpected keyword argument(s) {kwargs}') self._header.set_sform(affine, code) if update_affine: if self._affine is None: diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 81dae3010c..69a08af8db 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -99,10 +99,9 @@ def optional_package(name, trip_msg=None, min_version=None): # Failed version check if trip_msg is None: if callable(min_version): - trip_msg = 'Package %s fails version check' % min_version + trip_msg = f'Package {min_version} fails version check' else: - trip_msg = ('These functions need %s version >= %s' % - (name, min_version)) + trip_msg = (f'These functions need {name} version >= {min_version}') if trip_msg is None: trip_msg = ('We need package %s for these functions, but ' '``import %s`` raised %s' @@ -111,6 +110,6 @@ def optional_package(name, trip_msg=None, min_version=None): def setup_module(): import unittest - raise unittest.SkipTest('No %s for these tests' % name) + raise unittest.SkipTest(f'No {name} for these tests') return pkg, False, setup_module diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 01ae7b5866..6a18f731f9 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -113,8 +113,7 @@ def ornt_transform(start_ornt, end_ornt): if start_ornt.shape != end_ornt.shape: raise ValueError("The orientations must have the same shape") if start_ornt.shape[1] != 2: - raise ValueError("Invalid shape for an orientation: %s" % - (start_ornt.shape,)) + raise ValueError(f"Invalid shape for an orientation: {start_ornt.shape}") result = np.empty_like(start_ornt) for end_in_idx, (end_out_idx, end_flip) in enumerate(end_ornt): for start_in_idx, (start_out_idx, start_flip) in enumerate(start_ornt): @@ -296,7 +295,7 @@ def ornt2axcodes(ornt, labels=None): continue axint = int(np.round(axno)) if axint != axno: - raise ValueError('Non integer axis number %f' % axno) + raise ValueError(f'Non integer axis number {axno:f}') elif direction == 1: axcode = labels[axint][1] elif direction == -1: @@ -336,7 +335,7 @@ def axcodes2ornt(axcodes, labels=None): labels = list(zip('LPI', 'RAS')) if labels is None else labels allowed_labels = sum([list(L) for L in labels], []) + [None] if len(allowed_labels) != len(set(allowed_labels)): - raise ValueError('Duplicate labels in {}'.format(allowed_labels)) + raise ValueError(f'Duplicate labels in {allowed_labels}') if not set(axcodes).issubset(allowed_labels): raise ValueError('Not all axis codes {} in label set {}' .format(list(axcodes), allowed_labels)) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index e9ecc91cc4..bb63d28f80 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -430,8 +430,7 @@ def vol_is_full(slice_nos, slice_max, slice_min=1): slice_set = set(range(slice_min, slice_max + 1)) if not slice_set.issuperset(slice_nos): raise ValueError( - 'Slice numbers outside inclusive range {0} to {1}'.format( - slice_min, slice_max)) + f'Slice numbers outside inclusive range {slice_min} to {slice_max}') vol_nos = np.array(vol_numbers(slice_nos)) slice_nos = np.asarray(slice_nos) is_full = np.ones(slice_nos.shape, dtype=bool) @@ -500,10 +499,10 @@ def parse_PAR_header(fobj): version, gen_dict, image_lines = _split_header(fobj) if version not in supported_versions: warnings.warn(one_line( - """ PAR/REC version '{0}' is currently not supported -- making an + f""" PAR/REC version '{version}' is currently not supported -- making an attempt to read nevertheless. Please email the NiBabel mailing list, if you are interested in adding support for this version. - """.format(version))) + """)) general_info = _process_gen_dict(gen_dict) image_defs = _process_image_lines(image_lines, version) return general_info, image_defs @@ -980,7 +979,7 @@ def get_affine(self, origin='scanner'): permute_to_psl = ACQ_TO_PSL.get(slice_orientation) if permute_to_psl is None: raise PARRECError( - "Unknown slice orientation ({0}).".format(slice_orientation)) + f"Unknown slice orientation ({slice_orientation}).") # hdr has deg, we need radians # Order is [ap, fh, rl] ap_rot, fh_rot, rl_rot = self.general_info['angulation'] * DEG2RAD @@ -1076,7 +1075,7 @@ def get_data_scaling(self, method="dv"): slope = 1.0 / scale_slope intercept = rescale_intercept / (rescale_slope * scale_slope) else: - raise ValueError("Unknown scaling method '%s'." % method) + raise ValueError(f"Unknown scaling method '{method}'.") reorder = self.get_sorted_slice_indices() slope = slope[reorder] intercept = intercept[reorder] diff --git a/nibabel/processing.py b/nibabel/processing.py index b3bd83d706..5be5333a5d 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -369,9 +369,9 @@ def conform(from_img, if from_img.ndim != required_ndim: raise ValueError("Only 3D images are supported.") elif len(out_shape) != required_ndim: - raise ValueError("`out_shape` must have {} values".format(required_ndim)) + raise ValueError(f"`out_shape` must have {required_ndim} values") elif len(voxel_size) != required_ndim: - raise ValueError("`voxel_size` must have {} values".format(required_ndim)) + raise ValueError(f"`voxel_size` must have {required_ndim} values") start_ornt = io_orientation(from_img.affine) end_ornt = axcodes2ornt(orientation) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index cd3646853d..8947d513fa 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -96,7 +96,7 @@ def fillpositive(xyz, w2_thresh=None): w2 = 1.0 - np.dot(xyz, xyz) if w2 < 0: if w2 < w2_thresh: - raise ValueError('w2 should be positive, but is %e' % w2) + raise ValueError(f'w2 should be positive, but is {w2:e}') w = 0 else: w = np.sqrt(w2) diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index d0bdb655b0..1e4033b676 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -51,8 +51,7 @@ def rst_table(cell_values, cross = format_chars.pop('cross', '+') title_heading = format_chars.pop('title_heading', '*') if len(format_chars) != 0: - raise ValueError('Unexpected ``format_char`` keys {0}'.format( - ', '.join(format_chars))) + raise ValueError(f"Unexpected ``format_char`` keys {', '.join(format_chars)}") down_joiner = ' ' + down + ' ' down_starter = down + ' ' down_ender = ' ' + down @@ -66,11 +65,11 @@ def rst_table(cell_values, cell_values = np.asarray(cell_values) R, C = cell_values.shape[:2] if row_names is None: - row_names = ['row[{0}]'.format(r) for r in range(R)] + row_names = [f'row[{r}]' for r in range(R)] elif len(row_names) != R: raise ValueError('len(row_names) != number of rows') if col_names is None: - col_names = ['col[{0}]'.format(c) for c in range(C)] + col_names = [f'col[{c}]' for c in range(C)] elif len(col_names) != C: raise ValueError('len(col_names) != number of columns') row_len = max(len(name) for name in row_names) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 102ad8fd01..8dfe96f927 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -90,7 +90,7 @@ def load(fileobj, lazy_load=False): tractogram_file = detect_format(fileobj) if tractogram_file is None: - raise ValueError("Unknown format for 'fileobj': {}".format(fileobj)) + raise ValueError(f"Unknown format for 'fileobj': {fileobj}") return tractogram_file.load(fileobj, lazy_load=lazy_load) @@ -116,7 +116,7 @@ def save(tractogram, filename, **kwargs): tractogram_file_class = detect_format(filename) if isinstance(tractogram, Tractogram): if tractogram_file_class is None: - msg = "Unknown tractogram file format: '{}'".format(filename) + msg = f"Unknown tractogram file format: '{filename}'" raise ValueError(msg) tractogram_file = tractogram_file_class(tractogram, **kwargs) diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 71b4bcb3be..51e7c4d7fa 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -74,7 +74,7 @@ def fn_binary_op(self, value): "__floordiv__", "__truediv__", "__lshift__", "__rshift__", "__or__", "__and__", "__xor__"]: _wrap(cls, op=op, inplace=False) - _wrap(cls, op="__i{}__".format(op.strip("_")), inplace=True) + _wrap(cls, op=f"__i{op.strip('_')}__", inplace=True) for op in ["__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__"]: _wrap(cls, op) @@ -526,8 +526,7 @@ def __repr__(self): else: data = str(list(self)) - return "{name}({data})".format(name=self.__class__.__name__, - data=data) + return f"{self.__class__.__name__}({data})" def save(self, filename): """ Saves this :class:`ArraySequence` object to a .npz file. """ diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 627dcbb569..bad1c51a8b 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -252,9 +252,9 @@ def _write_header(fileobj, header): lines = [] lines.append(asstr(header[Field.MAGIC_NUMBER])) - lines.append("count: {0:010}".format(header[Field.NB_STREAMLINES])) + lines.append(f"count: {header[Field.NB_STREAMLINES]:010}") lines.append("datatype: Float32LE") # Always Float32LE. - lines.extend(["{0}: {1}".format(k, v) + lines.extend([f"{k}: {v}" for k, v in header.items() if k not in exclude and not k.startswith("_")]) lines.append("file: . ") # Manually add this last field. @@ -262,12 +262,12 @@ def _write_header(fileobj, header): # Check the header is well formatted. if out.count("\n") > len(lines) - 1: # \n only allowed between lines. - msg = "Key-value pairs cannot contain '\\n':\n{}".format(out) + msg = f"Key-value pairs cannot contain '\\n':\n{out}" raise HeaderError(msg) if out.count(":") > len(lines) - 1: # : only one per line (except the last one which contains END). - msg = "Key-value pairs cannot contain ':':\n{}".format(out) + msg = f"Key-value pairs cannot contain ':':\n{out}" raise HeaderError(msg) # Write header to file. @@ -338,7 +338,7 @@ def _read_header(fileobj): msg = ("Missing 'file' attribute in TCK header." " Will try to guess it.") warnings.warn(msg, HeaderWarning) - hdr['file'] = '. {}'.format(offset_data) + hdr['file'] = f'. {offset_data}' if hdr['file'].split()[0] != '.': msg = ("TCK only supports single-file - in other words the" @@ -452,7 +452,7 @@ def __str__(self): hdr = self.header info = "" - info += "\nMAGIC NUMBER: {0}".format(hdr[Field.MAGIC_NUMBER]) + info += f"\nMAGIC NUMBER: {hdr[Field.MAGIC_NUMBER]}" info += "\n" info += "\n".join(["{}: {}".format(k, v) for k, v in hdr.items() if not k.startswith('_')]) diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 06e19248f4..aa61e89e3e 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -402,7 +402,7 @@ def _test_binary(op, arrseq, scalars, seqs, inplace=False): if op in CMP_OPS: continue - op = "__i{}__".format(op.strip("_")) + op = f"__i{op.strip('_')}__" _test_binary(op, seq, SCALARS, ARRSEQS, inplace=True) if op == "__itruediv__": diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index 8fb35fc368..968cd41500 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -373,7 +373,7 @@ def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_point. data_per_point = {} for i in range(10): - data_per_point['#{0}'.format(i)] = DATA['fa'] + data_per_point[f'#{i}'] = DATA['fa'] tractogram = Tractogram(DATA['streamlines'], data_per_point=data_per_point, @@ -388,7 +388,7 @@ def test_write_too_many_scalars_and_properties(self): assert_tractogram_equal(new_trk.tractogram, tractogram) # More than 10 data_per_point should raise an error. - data_per_point['#{0}'.format(i+1)] = DATA['fa'] + data_per_point[f'#{i + 1}'] = DATA['fa'] tractogram = Tractogram(DATA['streamlines'], data_per_point=data_per_point, @@ -401,7 +401,7 @@ def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_streamline. data_per_streamline = {} for i in range(10): - data_per_streamline['#{0}'.format(i)] = DATA['mean_torsion'] + data_per_streamline[f'#{i}'] = DATA['mean_torsion'] tractogram = Tractogram(DATA['streamlines'], data_per_streamline=data_per_streamline, @@ -416,7 +416,7 @@ def test_write_too_many_scalars_and_properties(self): assert_tractogram_equal(new_trk.tractogram, tractogram) # More than 10 data_per_streamline should raise an error. - data_per_streamline['#{0}'.format(i+1)] = DATA['mean_torsion'] + data_per_streamline[f'#{i + 1}'] = DATA['mean_torsion'] tractogram = Tractogram(DATA['streamlines'], data_per_streamline=data_per_streamline) diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 71f9e84db2..b52be2a5c6 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -31,7 +31,7 @@ def test_data(subdir=None, fname=None): elif subdir in ('gifti', 'nicom', 'externals'): resource = os.path.join(subdir, 'tests', 'data') else: - raise ValueError("Unknown test data directory: %s" % subdir) + raise ValueError(f"Unknown test data directory: {subdir}") if fname is not None: resource = os.path.join(resource, fname) @@ -89,7 +89,7 @@ def assert_re_in(regex, c, flags=0): for e in c: if re.match(regex, e, flags=flags): return - raise AssertionError("Not a single entry matched %r in %r" % (regex, c)) + raise AssertionError(f"Not a single entry matched {regex!r} in {c!r}") def get_fresh_mod(mod_name=__name__): @@ -199,7 +199,7 @@ class suppress_warnings(error_warnings): def runif_extra_has(test_str): """Decorator checks to see if NIPY_EXTRA_TESTS env var contains test_str""" - return unittest.skipUnless(test_str in EXTRA_SET, "Skip {0} tests.".format(test_str)) + return unittest.skipUnless(test_str in EXTRA_SET, f"Skip {test_str} tests.") def assert_arr_dict_equal(dict1, dict2): diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index c7352c3f89..688ee66674 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -71,6 +71,4 @@ def gmean_norm(data): fixed_img = resample_img2img(normal_img, funny_img) fixed_data = fixed_img.get_fdata() difference_data = normal_normed - gmean_norm(fixed_data) - print('RMS resliced {:<52} : {}'.format( - parfile, - np.sqrt(np.sum(difference_data ** 2)))) + print(f'RMS resliced {parfile:<52} : {np.sqrt(np.sum(difference_data ** 2))}') diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 3c1b58502d..663d7845a8 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -46,4 +46,4 @@ def needs_nibabel_data(subdir=None): # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 return unittest.skipUnless(have_files, - "Need files in {0} for these tests".format(required_path)) + f"Need files in {required_path} for these tests") diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 0027cc36b2..bc7e9977f0 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -80,7 +80,7 @@ def __init__(self, self.local_script_dir = local_script_dir(script_sdir) self.local_module_dir = local_module_dir(module_sdir) if debug_print_var is None: - debug_print_var = '{0}_DEBUG_PRINT'.format(module_sdir.upper()) + debug_print_var = f'{module_sdir.upper()}_DEBUG_PRINT' self.debug_print = os.environ.get(debug_print_var, False) self.output_processor = output_processor @@ -119,9 +119,9 @@ def run_command(self, cmd, check_code=True): # Quote any arguments with spaces. The quotes delimit the arguments # on Windows, and the arguments might be file paths with spaces. # On Unix the list elements are each separate arguments. - cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd] + cmd = [f'"{c}"' if ' ' in c else c for c in cmd] if self.debug_print: - print("Running command '%s'" % cmd) + print(f"Running command '{cmd}'") env = os.environ if not self.local_module_dir is None: # module likely comes from the current working directory. We might need @@ -139,13 +139,13 @@ def run_command(self, cmd, check_code=True): proc.terminate() if check_code and proc.returncode != 0: raise RuntimeError( - """Command "{0}" failed with + f"""Command "{cmd}" failed with stdout ------ - {1} + {stdout} stderr ------ - {2} - """.format(cmd, stdout, stderr)) + {stderr} + """) opp = self.output_processor return proc.returncode, opp(stdout), opp(stderr) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index a4d23aaefd..76043348c9 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -18,7 +18,7 @@ def meth(self): for imaker, params in self.obj_params(): validator(self, imaker, params) meth.__name__ = 'test_' + name[len('validate_'):] - meth.__doc__ = 'autogenerated test from {}.{}'.format(klass.__name__, name) + meth.__doc__ = f'autogenerated test from {klass.__name__}.{name}' return meth for name in dir(klass): if not name.startswith('validate_'): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 4b065b312f..887b231464 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -409,7 +409,7 @@ def test_keep_file_open_true_false_invalid(): with InTemporaryDirectory(), \ mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ patch_indexed_gzip(have_igzip): - fname = 'testdata.{}'.format(filetype) + fname = f'testdata.{filetype}' # create the test data file if filetype == 'gz': with gzip.open(fname, 'wb') as fobj: diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 9268c3fe36..1e93440269 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -655,7 +655,7 @@ def test_float_int_min_max(): arr = np.array([finf['min'], finf['max']], dtype=in_dt) # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(arr)): - print('Hit PPC max -> inf bug; skip in_type %s' % in_dt) + print(f'Hit PPC max -> inf bug; skip in_type {in_dt}') continue for out_dt in IUINT_TYPES: try: diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 57d5b36f38..56671cdf7d 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -141,7 +141,7 @@ def test_data_path(with_nimd_env): tmpfile = pjoin(tmpdir, 'config.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') - fobj.write('path = %s' % tst_pth) + fobj.write(f'path = {tst_pth}') nibd.get_nipy_user_dir = lambda: tmpdir assert get_data_path() == tst_list + def_dirs + [tmpdir] nibd.get_nipy_user_dir = lambda: fake_user_dir @@ -152,7 +152,7 @@ def test_data_path(with_nimd_env): tmpfile = pjoin(tmpdir, 'an_example.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') - fobj.write('path = %s\n' % tst_pth) + fobj.write(f'path = {tst_pth}\n') tmpfile = pjoin(tmpdir, 'another_example.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index cf56dd598d..a22e633d5a 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -100,8 +100,7 @@ def test_dep_func(self): with pytest.raises(ExpiredDeprecationError): func() assert (func.__doc__ == - 'foo\n\n* Raises {} as of version: 1.8\n' - .format(ExpiredDeprecationError)) + f'foo\n\n* Raises {ExpiredDeprecationError} as of version: 1.8\n') func = dec('foo', '1.2', '1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index e419eb8868..a08a24d102 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -67,7 +67,7 @@ def test_type_info(): ld_dict = dbl_dict.copy() ld_dict['width'] = width else: - raise ValueError("Unexpected float type {} to test".format(np.longdouble)) + raise ValueError(f"Unexpected float type {np.longdouble} to test") assert ld_dict == infod diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index a19289037f..cd1ea18709 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -57,8 +57,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, if expect_success: # Check that the sniff returned is appropriate. - new_msg = '%s returned sniff==None (%s)' % (img_klass.__name__, - msg) + new_msg = f'{img_klass.__name__} returned sniff==None ({msg})' expected_sizeof_hdr = getattr(img_klass.header_class, 'sizeof_hdr', 0) current_sizeof_hdr = 0 if new_sniff is None else \ diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 71f0435f1a..fdf2d93dde 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -89,7 +89,7 @@ def test_read_img_data_nifti(): with pytest.raises(ImageFileError): read_img_data(img) # Make a filemap - froot = 'an_image_{0}'.format(i) + froot = f'an_image_{i}' img.file_map = img.filespec_to_file_map(froot) # Trying to read from this filemap will generate an error because # we are going to read from files that do not exist diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 28144f3af4..386dca2cd9 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -35,7 +35,7 @@ def test_module_removal(): for module in _filter(MODULE_SCHEDULE): with pytest.raises(ImportError): __import__(module) - assert False, "Time to remove %s" % module + assert False, f"Time to remove {module}" def test_object_removal(): @@ -44,7 +44,7 @@ def test_object_removal(): module = __import__(module_name) except ImportError: continue - assert not hasattr(module, obj), "Time to remove %s.%s" % (module_name, obj,) + assert not hasattr(module, obj), f"Time to remove {module_name}.{obj}" def test_attribute_removal(): @@ -57,7 +57,7 @@ def test_attribute_removal(): klass = getattr(module, cls) except AttributeError: continue - assert not hasattr(klass, attr), "Time to remove %s.%s.%s" % (module_name, cls, attr,) + assert not hasattr(klass, attr), f"Time to remove {module_name}.{cls}.{attr}" # diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index 9ef8dd3bad..3c33eb5740 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -188,7 +188,7 @@ def check_int_a2f(in_type, out_type): # Bug in numpy 1.6.2 on PPC leading to infs - abort if not np.all(np.isfinite(data)): if DEBUG: - print('Hit PPC max -> inf bug; skip in_type %s' % in_type) + print(f'Hit PPC max -> inf bug; skip in_type {in_type}') return else: # Funny behavior with complex256 data = np.zeros((2,), in_type) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 87d28d8245..591f85343a 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -175,7 +175,7 @@ def test_help(): continue # do not test this one code, stdout, stderr = run_command([cmd, '--help']) assert code == 0 - assert_re_in(".*%s" % cmd, stdout) + assert_re_in(f".*{cmd}", stdout) assert_re_in(".*Usage", stdout) # Some third party modules might like to announce some Deprecation # etc warnings, see e.g. https://travis-ci.org/nipy/nibabel/jobs/370353602 @@ -194,15 +194,15 @@ def test_nib_nifti_dx(): clean_hdr = pjoin(DATA_PATH, 'nifti1.hdr') cmd = ['nib-nifti-dx', clean_hdr] code, stdout, stderr = run_command(cmd) - assert stdout.strip() == 'Header for "%s" is clean' % clean_hdr + assert stdout.strip() == f'Header for "{clean_hdr}" is clean' dirty_hdr = pjoin(DATA_PATH, 'analyze.hdr') cmd = ['nib-nifti-dx', dirty_hdr] code, stdout, stderr = run_command(cmd) - expected = """Picky header check output for "%s" + expected = f"""Picky header check output for "{dirty_hdr}" pixdim[0] (qfac) should be 1 (default) or -1 magic string "" is not valid -sform_code 11776 not valid""" % (dirty_hdr,) +sform_code 11776 not valid""" # Split strings to remove line endings assert stdout == expected diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 65c0759bc6..58f05180fa 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -630,9 +630,9 @@ def test_load_mmap(self): back_img = func(param1, **kwargs) back_data = np.asanyarray(back_img.dataobj) if expected_mode is None: - assert not isinstance(back_data, np.memmap), 'Should not be a %s' % img_klass.__name__ + assert not isinstance(back_data, np.memmap), f'Should not be a {img_klass.__name__}' else: - assert isinstance(back_data, np.memmap), 'Not a %s' % img_klass.__name__ + assert isinstance(back_data, np.memmap), f'Not a {img_klass.__name__}' if self.check_mmap_mode: assert back_data.mode == expected_mode del back_img, back_data diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index fc63556edc..011e16d47d 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -347,7 +347,7 @@ class MyHdr(self.header_class): # Speculating that we can set code value 0 or 1 new_code = 1 if code == 0 else 0 hdr[key] = new_code - assert hdr.get_value_label(key) == ''.format(new_code) + assert hdr.get_value_label(key) == f'' class MyWrapStruct(WrapStruct): diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index 3b46336bd8..f18405b0d0 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -167,8 +167,7 @@ def read(fileobj, as_generator=False, points_space=None, strict=True): else: hdr = hdr.newbyteorder() if hdr['hdr_size'] != 1000: - raise HeaderError('Invalid hdr_size of %s' - % hdr['hdr_size']) + raise HeaderError(f"Invalid hdr_size of {hdr['hdr_size']}") endianness = swapped_code # Check version and adapt structure accordingly version = hdr['version'] @@ -248,8 +247,7 @@ def track_gen(): # Raise error if we didn't get as many streams as claimed if n_streams_required != np.inf and n_streams < n_streams_required: raise DataError( - 'Expecting {0} streamlines, found only {1}'.format( - stream_count, n_streams)) + f'Expecting {stream_count} streamlines, found only {n_streams}') streamlines = track_gen() if not as_generator: @@ -428,8 +426,7 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, raise DataError('Expecting 0 scalars per point') else: if scalars.shape != (n_pts, n_s): - raise DataError('Scalars should be shape (%s, %s)' % - (n_pts, n_s)) + raise DataError(f'Scalars should be shape ({n_pts}, {n_s})') if scalars.dtype != f4dt: scalars = scalars.astype(f4dt) pts = np.c_[pts, scalars] @@ -439,7 +436,7 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, raise DataError('Expecting 0 properties per point') else: if props.size != n_p: - raise DataError('Properties should be size %s' % n_p) + raise DataError(f'Properties should be size {n_p}') if props.dtype != f4dt: props = props.astype(f4dt) fileobj.write(props.tobytes()) @@ -480,7 +477,7 @@ def _check_hdr_points_space(hdr, points_space): raise HeaderError('Cannot convert between voxels and voxmm when ' '"voxel_sizes" all 0') if np.any(voxel_size == 0): - warnings.warn('zero values in "voxel_size" - %s' % voxel_size) + warnings.warn(f'zero values in "voxel_size" - {voxel_size}') return elif points_space == 'rasmm': try: diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 27a750bc0f..509fa99ef2 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -213,8 +213,8 @@ def __init__(self, data, affine=None, axes=None, title=None): self._draw() def __repr__(self): - title = '' if self._title is None else ('%s ' % self._title) - vol = '' if self.n_volumes <= 1 else (', %s' % self.n_volumes) + title = '' if self._title is None else (f'{self._title} ') + vol = '' if self.n_volumes <= 1 else (f', {self.n_volumes}') r = ('<%s: %s(%s, %s, %s%s)>' % (self.__class__.__name__, title, self._sizes[0], self._sizes[1], self._sizes[2], vol)) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 606e06f52f..962233c395 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -108,8 +108,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.field1 = {} # a placeholder for the check below for name in fields: if name in self.__dict__: - raise KeyError('Input name %s already in object dict' - % name) + raise KeyError(f'Input name {name} already in object dict') self.__dict__[name] = map_maker() self.field1 = self.__dict__[fields[0]] self.add_codes(codes) diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index 4eabe2504a..50d447a3fd 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -417,8 +417,7 @@ def structarr(self): def __str__(self): """ Return string representation for printing """ - summary = "%s object, endian='%s'" % (self.__class__, - self.endianness) + summary = f"{self.__class__} object, endian='{self.endianness}'" return '\n'.join([summary, pretty_mapping(self)]) def as_byteswapped(self, endianness=None): @@ -529,16 +528,16 @@ def get_value_label(self, fieldname): 'two' """ if fieldname not in self._field_recoders: - raise ValueError('%s not a coded field' % fieldname) + raise ValueError(f'{fieldname} not a coded field') code = int(self._structarr[fieldname]) try: return self._field_recoders[fieldname].label[code] except KeyError: - return ''.format(code) + return f'' def __str__(self): """ Return string representation for printing """ - summary = "%s object, endian='%s'" % (self.__class__, self.endianness) + summary = f"{self.__class__} object, endian='{self.endianness}'" def _getter(obj, key): try: diff --git a/nisext/sexts.py b/nisext/sexts.py index 9ca3519f45..a2b1a10af7 100644 --- a/nisext/sexts.py +++ b/nisext/sexts.py @@ -160,7 +160,7 @@ def version_getter(pkg_name): msgs['opt suffix']) return elif status == 'no-version': - raise RuntimeError('Cannot find version for %s' % pkg_name) + raise RuntimeError(f'Cannot find version for {pkg_name}') assert status == 'low-version' if not optional_tf: raise RuntimeError(msgs['version too old'] % (have_version, @@ -253,7 +253,7 @@ def run(self): froot, ext = splitext(fname) bat_file = pjoin(pth, froot + '.bat') bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname) - log.info("Making %s wrapper for %s" % (bat_file, filepath)) + log.info(f"Making {bat_file} wrapper for {filepath}") if self.dry_run: continue with open(bat_file, 'wt') as fobj: diff --git a/nisext/testers.py b/nisext/testers.py index f324d272b4..e661de72a2 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -137,7 +137,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): os.environ['PYTHONPATH'] = r'"{pkg_path}"' + os.path.pathsep + PYTHONPATH """.format(**locals()) if print_location: - p_loc = 'print(%s.__file__);' % mod_name + p_loc = f'print({mod_name}.__file__);' else: p_loc = '' cwd = os.getcwd() @@ -154,7 +154,7 @@ def run_mod_cmd(mod_name, pkg_path, cmd, script_dir=None, print_location=True): import {mod_name} {p_loc} {cmd}""".format(**locals())) - res = back_tick('{0} script.py'.format(PYTHON), ret_err=True) + res = back_tick(f'{PYTHON} script.py', ret_err=True) finally: os.chdir(cwd) shutil.rmtree(tmpdir) @@ -194,8 +194,7 @@ def install_from_to(from_dir, to_dir, py_lib_sdir=PY_LIB_SDIR, bin_sdir='bin'): py_lib_locs = ' --install-purelib=%s --install-platlib=%s' % ( site_pkgs_path, site_pkgs_path) pwd = os.path.abspath(os.getcwd()) - cmd = ('%s setup.py --quiet install --prefix=%s %s' % - (PYTHON, to_dir, py_lib_locs)) + cmd = (f'{PYTHON} setup.py --quiet install --prefix={to_dir} {py_lib_locs}') try: os.chdir(from_dir) back_tick(cmd) @@ -267,11 +266,11 @@ def contexts_print_info(mod_name, repo_path, install_path): out_fname = pjoin(install_path, 'test.zip') try: os.chdir(repo_path) - back_tick('git archive --format zip -o %s HEAD' % out_fname) + back_tick(f'git archive --format zip -o {out_fname} HEAD') finally: os.chdir(pwd) install_from_zip(out_fname, install_path, None) - cmd_str = 'print(%s.get_info())' % mod_name + cmd_str = f'print({mod_name}.get_info())' print(run_mod_cmd(mod_name, site_pkgs_path, cmd_str)[0]) # now test install into a directory from the repository install_from_to(repo_path, install_path, PY_LIB_SDIR) @@ -444,7 +443,7 @@ def sdist_tests(mod_name, repo_path=None, label='fast', doctests=True): install_from_zip(zip_fname, install_path, pf, PY_LIB_SDIR, 'bin') site_pkgs_path = pjoin(install_path, PY_LIB_SDIR) script_path = pjoin(install_path, 'bin') - cmd = "%s.test(label='%s', doctests=%s)" % (mod_name, label, doctests) + cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" stdout, stderr = run_mod_cmd(mod_name, site_pkgs_path, cmd, @@ -479,7 +478,7 @@ def bdist_egg_tests(mod_name, repo_path=None, label='fast', doctests=True): 'bdist_egg', '*.egg') zip_extract_all(zip_fname, install_path) - cmd = "%s.test(label='%s', doctests=%s)" % (mod_name, label, doctests) + cmd = f"{mod_name}.test(label='{label}', doctests={doctests})" stdout, stderr = run_mod_cmd(mod_name, install_path, cmd, @@ -524,8 +523,7 @@ def make_dist(repo_path, out_dir, setup_params, zipglob): pwd = os.path.abspath(os.getcwd()) try: os.chdir(repo_path) - back_tick('%s setup.py %s --dist-dir=%s' - % (PYTHON, setup_params, out_dir)) + back_tick(f'{PYTHON} setup.py {setup_params} --dist-dir={out_dir}') zips = glob(pjoin(out_dir, zipglob)) if len(zips) != 1: raise OSError('There must be one and only one %s file, ' diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py index 08fa70cd1a..8c8a09633f 100644 --- a/nisext/tests/test_testers.py +++ b/nisext/tests/test_testers.py @@ -14,7 +14,7 @@ def test_back_tick(): assert_equal(back_tick(cmd), "Hello") assert_equal(back_tick(cmd, ret_err=True), ("Hello", "")) assert_equal(back_tick(cmd, True, False), (b"Hello", b"")) - cmd = '{0} -c "raise ValueError()"'.format(PYTHON) + cmd = f'{PYTHON} -c "raise ValueError()"' assert_raises(RuntimeError, back_tick, cmd) diff --git a/tools/make_tarball.py b/tools/make_tarball.py index 7a9cba1269..69c901d67d 100755 --- a/tools/make_tarball.py +++ b/tools/make_tarball.py @@ -10,8 +10,8 @@ from toollib import * tag = commands.getoutput('git describe') -base_name = 'nibabel-%s' % tag -tar_name = '%s.tgz' % base_name +base_name = f'nibabel-{tag}' +tar_name = f'{base_name}.tgz' # git archive is weird: Even if I give it a specific path, it still won't # archive the whole tree. It seems the only way to get the whole tree is to cd @@ -22,4 +22,4 @@ cd('..') git_tpl = 'git archive --format=tar --prefix={0}/ HEAD | gzip > {1}' c(git_tpl.format(base_name, tar_name)) -c('mv {0} tools/'.format(tar_name)) +c(f'mv {tar_name} tools/') diff --git a/versioneer.py b/versioneer.py index eec2ab0f07..99ffbc017b 100644 --- a/versioneer.py +++ b/versioneer.py @@ -397,20 +397,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if e.errno == errno.ENOENT: continue if verbose: - print("unable to run %s" % dispcmd) + print(f"unable to run {dispcmd}") print(e) return None, None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}") return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) + print(f"unable to run {dispcmd} (error)") + print(f"stdout was {stdout}") return None, p.returncode return stdout, p.returncode @@ -1023,9 +1023,9 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) + print(f"discarding '{','.join(refs - tags)}', no digits") if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) + print(f"likely tags: {','.join(sorted(tags))}") for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): @@ -1036,7 +1036,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if not re.match(r'\d', r): continue if verbose: - print("picking %s" % r) + print(f"picking {r}") return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, @@ -1065,14 +1065,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): hide_stderr=True) if rc != 0: if verbose: - print("Directory %s not under git control" % root) + print(f"Directory {root} not under git control") raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", - "--match", "%s*" % tag_prefix], + "--match", f"{tag_prefix}*"], cwd=root) # --long was added in git-1.5.5 if describe_out is None: @@ -1105,8 +1105,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = (f"unable to parse git-describe output: '{describe_out}'") return pieces # tag @@ -1173,7 +1172,7 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): pass if not present: f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) + f.write(f"{versionfile_source} export-subst\n") f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) @@ -1247,7 +1246,7 @@ def write_to_version_file(filename, versions): with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) - print("set %s to '%s'" % (filename, versions["version"])) + print(f"set {filename} to '{versions['version']}'") def plus_or_dot(pieces): @@ -1315,13 +1314,13 @@ def render_pep440_post(pieces): if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] + rendered += f"g{pieces['short']}" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" - rendered += "+g%s" % pieces["short"] + rendered += f"+g{pieces['short']}" return rendered @@ -1412,7 +1411,7 @@ def render(pieces, style): elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: - raise ValueError("unknown style '%s'" % style) + raise ValueError(f"unknown style '{style}'") return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, @@ -1437,7 +1436,7 @@ def get_versions(verbose=False): assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS + assert handlers, f"unrecognized VCS '{cfg.VCS}'" verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" @@ -1458,7 +1457,7 @@ def get_versions(verbose=False): keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: - print("got version from expanded keyword %s" % ver) + print(f"got version from expanded keyword {ver}") return ver except NotThisMethod: pass @@ -1466,7 +1465,7 @@ def get_versions(verbose=False): try: ver = versions_from_file(versionfile_abs) if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) + print(f"got version from file {versionfile_abs} {ver}") return ver except NotThisMethod: pass @@ -1477,7 +1476,7 @@ def get_versions(verbose=False): pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: - print("got version from VCS %s" % ver) + print(f"got version from VCS {ver}") return ver except NotThisMethod: pass @@ -1486,7 +1485,7 @@ def get_versions(verbose=False): if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: - print("got version from parentdir %s" % ver) + print(f"got version from parentdir {ver}") return ver except NotThisMethod: pass @@ -1539,12 +1538,12 @@ def finalize_options(self): def run(self): vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) + print(f"Version: {vers['version']}") + print(f" full-revisionid: {vers.get('full-revisionid')}") + print(f" dirty: {vers.get('dirty')}") + print(f" date: {vers.get('date')}") if vers["error"]: - print(" error: %s" % vers["error"]) + print(f" error: {vers['error']}") cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1579,7 +1578,7 @@ def run(self): if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) + print(f"UPDATING {target_versionfile}") write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py @@ -1598,7 +1597,7 @@ def run(self): cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) + print(f"UPDATING {target_versionfile}") write_to_version_file(target_versionfile, versions) _build_exe.run(self) @@ -1627,7 +1626,7 @@ def run(self): cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) + print(f"UPDATING {target_versionfile}") write_to_version_file(target_versionfile, versions) _py2exe.run(self) @@ -1666,7 +1665,7 @@ def make_release_tree(self, base_dir, files): # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) + print(f"UPDATING {target_versionfile}") write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist @@ -1733,7 +1732,7 @@ def do_setup(): print(CONFIG_ERROR, file=sys.stderr) return 1 - print(" creating %s" % cfg.versionfile_source) + print(f" creating {cfg.versionfile_source}") with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", @@ -1752,13 +1751,13 @@ def do_setup(): except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) + print(f" appending to {ipy}") with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: - print(" %s unmodified" % ipy) + print(f" {ipy} unmodified") else: - print(" %s doesn't exist, ok" % ipy) + print(f" {ipy} doesn't exist, ok") ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source @@ -1789,7 +1788,7 @@ def do_setup(): print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) + f.write(f"include {cfg.versionfile_source}\n") else: print(" versionfile_source already in MANIFEST.in") From 789a0723140fd47af16973ded519074aa895e12f Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 22:37:33 +0300 Subject: [PATCH 7/9] MNT: Remove v3.0 deprecations Remove also related tests --- nibabel/__init__.py | 1 - nibabel/arrayproxy.py | 7 +- nibabel/dataobj_images.py | 9 -- nibabel/ecat.py | 2 +- nibabel/filebasedimages.py | 28 ---- nibabel/minc1.py | 13 -- nibabel/orientations.py | 8 -- nibabel/tests/test_image_api.py | 6 - nibabel/tests/test_minc1.py | 17 --- nibabel/tests/test_spatialimages.py | 38 ----- nibabel/tests/test_volumeutils.py | 9 -- nibabel/volumeutils.py | 216 ---------------------------- 12 files changed, 2 insertions(+), 352 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index f99e9e0b06..10128f4e0b 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -55,7 +55,6 @@ from .minc2 import Minc2Image from .cifti2 import Cifti2Header, Cifti2Image from .gifti import GiftiImage -from .minc1 import MincImage from .freesurfer import MGHImage from .funcs import (squeeze_image, concat_images, four_to_three, as_closest_canonical) diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index b45405b6fb..31206f2a63 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -79,7 +79,7 @@ class ArrayProxy(object): * get_slope_inter A header should also have a 'copy' method. This requirement will go away - when the deprecated 'header' propoerty goes away. + when the deprecated 'header' property goes away. This implementation allows us to deal with Analyze and its variants, including Nifti1, and with the MGH format. @@ -269,11 +269,6 @@ def _should_keep_file_open(self, file_like, keep_file_open): persist_opener = keep_file_open or have_igzip return keep_file_open, persist_opener - @property - @deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0') - def header(self): - return self._header - @property def shape(self): return self._shape diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 68972a8cb1..26a2574905 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -404,15 +404,6 @@ def shape(self): def ndim(self): return self._dataobj.ndim - @deprecate_with_version('get_shape method is deprecated.\n' - 'Please use the ``img.shape`` property ' - 'instead.', - '1.2', '3.0') - def get_shape(self): - """ Return shape for image - """ - return self.shape - @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): """ Class method to create image from mapping in ``file_map`` diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 1b986f9c4c..a917a25f79 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -543,7 +543,7 @@ def get_shape(self, frame=0): x = subhdr['x_dimension'].item() y = subhdr['y_dimension'].item() z = subhdr['z_dimension'].item() - return (x, y, z) + return x, y, z def get_nframes(self): """returns number of frames""" diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 436c2cd676..766fc414d7 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -263,14 +263,6 @@ def from_filename(klass, filename): def from_file_map(klass, file_map): raise NotImplementedError - @classmethod - @deprecate_with_version('from_files class method is deprecated.\n' - 'Please use the ``from_file_map`` class method ' - 'instead.', - '1.0', '3.0') - def from_files(klass, file_map): - return klass.from_file_map(file_map) - @classmethod def filespec_to_file_map(klass, filespec): """ Make `file_map` for this class from filename `filespec` @@ -307,14 +299,6 @@ def filespec_to_file_map(klass, filespec): file_map[key] = FileHolder(filename=fname) return file_map - @classmethod - @deprecate_with_version('filespec_to_files class method is deprecated.\n' - 'Please use the "filespec_to_file_map" class ' - 'method instead.', - '1.0', '3.0') - def filespec_to_files(klass, filespec): - return klass.filespec_to_file_map(filespec) - def to_filename(self, filename): """ Write image to files implied by filename string @@ -332,21 +316,9 @@ def to_filename(self, filename): self.file_map = self.filespec_to_file_map(filename) self.to_file_map() - @deprecate_with_version('to_filespec method is deprecated.\n' - 'Please use the "to_filename" method instead.', - '1.0', '3.0') - def to_filespec(self, filename): - self.to_filename(filename) - def to_file_map(self, file_map=None): raise NotImplementedError - @deprecate_with_version('to_files method is deprecated.\n' - 'Please use the "to_file_map" method instead.', - '1.0', '3.0') - def to_files(self, file_map=None): - self.to_file_map(file_map) - @classmethod def make_file_map(klass, mapping=None): """ Class method to make files holder for this image type diff --git a/nibabel/minc1.py b/nibabel/minc1.py index ebf883d7b8..28a679de25 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -342,16 +342,3 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): load = Minc1Image.load - - -# Backwards compatibility -@deprecate_with_version('MincFile is deprecated; please use Minc1File instead', - since='2.0.0', until='3.0.0', warn_class=FutureWarning) -class MincFile(Minc1File): - pass - - -@deprecate_with_version('MincImage is deprecated; please use Minc1Image instead', - since='2.0.0', until='3.0.0', warn_class=FutureWarning) -class MincImage(Minc1Image): - pass diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 6a18f731f9..d53c7d5632 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -228,14 +228,6 @@ def inv_ornt_aff(ornt, shape): return np.dot(undo_flip, undo_reorder) -@deprecate_with_version('orientation_affine deprecated. ' - 'Please use inv_ornt_aff instead' - '1.3', - '3.0') -def orientation_affine(ornt, shape): - return inv_ornt_aff(ornt, shape) - - @deprecate_with_version('flip_axis is deprecated. ' 'Please use numpy.flip instead' '3.2', diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index c9a7ed6e72..93d9149a67 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -427,12 +427,6 @@ def validate_ndim(self, imaker, params): with pytest.raises(AttributeError): img.ndim = 5 - def validate_shape_deprecated(self, imaker, params): - # Check deprecated get_shape API - img = imaker() - with pytest.raises(ExpiredDeprecationError): - img.get_shape() - def validate_mmap_parameter(self, imaker, params): img = imaker() fname = img.get_filename() diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index a908ee6ad9..132df0bb09 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -100,23 +100,6 @@ ] -def test_old_namespace(): - # Check old names are defined in minc1 module and top level - # Check warnings raised - arr = np.arange(24).reshape((2, 3, 4)) - aff = np.diag([2, 3, 4, 1]) - - from .. import Minc1Image, MincImage - assert Minc1Image is not MincImage - with pytest.raises(ExpiredDeprecationError): - MincImage(arr, aff) - # Another old name - from ..minc1 import MincFile, Minc1File - assert MincFile is not Minc1File - with pytest.raises(ExpiredDeprecationError): - mf = MincFile(netcdf_file(EG_FNAME)) - - class _TestMincFile(object): module = minc1 file_class = Minc1File diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 58f05180fa..5dfb7603cb 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -301,15 +301,6 @@ def test_str(self): img = img_klass(np.zeros((2, 3, 4), dtype=np.int16), np.eye(4)) assert len(str(img)) > 0 - def test_get_shape(self): - # Check that get_shape raises an ExpiredDeprecationError - img_klass = self.image_class - # Assumes all possible images support int16 - # See https://github.com/nipy/nibabel/issues/58 - img = img_klass(np.arange(1, dtype=np.int16), np.eye(4)) - with pytest.raises(ExpiredDeprecationError): - img.get_shape() - def test_get_fdata(self): # Test array image and proxy image interface for floating point data img_klass = self.image_class @@ -539,35 +530,6 @@ def test_slicer(self): assert (sliced_data == img.get_data()[sliceobj]).all() assert (sliced_data == img.get_fdata()[sliceobj]).all() - def test_api_deprecations(self): - - class FakeImage(self.image_class): - - files_types = (('image', '.foo'),) - - @classmethod - def to_file_map(self, file_map=None): - pass - - @classmethod - def from_file_map(self, file_map=None): - pass - - arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) - aff = np.eye(4) - img = FakeImage(arr, aff) - bio = BytesIO() - file_map = FakeImage.make_file_map({'image': bio}) - - with pytest.raises(ExpiredDeprecationError): - img.to_files(file_map) - with pytest.raises(ExpiredDeprecationError): - img.to_filespec('an_image') - with pytest.raises(ExpiredDeprecationError): - FakeImage.from_files(file_map) - with pytest.raises(ExpiredDeprecationError): - FakeImage.filespec_to_files('an_image') - class MmapImageMixin(object): """ Mixin for testing images that may return memory maps """ diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index f84878f55c..18bfd854f2 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -68,15 +68,6 @@ NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES -def test_deprecated_functions(): - with pytest.raises(ExpiredDeprecationError): - scale_min_max(0, 1, np.uint8, True) - with pytest.raises(ExpiredDeprecationError): - calculate_scale(np.array([-2, -1], dtype=np.int8), np.uint8, True) - with pytest.raises(ExpiredDeprecationError): - can_cast(np.float32, np.float32) - - def test__is_compressed_fobj(): # _is_compressed helper function with InTemporaryDirectory(): diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 962233c395..d460b9724a 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -372,74 +372,6 @@ def make_dt_codes(codes_seqs): return Recoder(dt_codes, fields + ['dtype', 'sw_dtype'], DtypeMapper) -@deprecate_with_version('can_cast deprecated. ' - 'Please use arraywriter classes instead', - '1.2', - '3.0') -def can_cast(in_type, out_type, has_intercept=False, has_slope=False): - """ Return True if we can safely cast ``in_type`` to ``out_type`` - - Parameters - ---------- - in_type : numpy type - type of data we will case from - out_dtype : numpy type - type that we want to cast to - has_intercept : bool, optional - Whether we can subtract a constant from the data (before scaling) - before casting to ``out_dtype``. Default is False - has_slope : bool, optional - Whether we can use a scaling factor to adjust slope of - relationship of data to data in cast array. Default is False - - Returns - ------- - tf : bool - True if we can safely cast, False otherwise - - Examples - -------- - >>> can_cast(np.float64, np.float32) # doctest: +SKIP - True - >>> can_cast(np.complex128, np.float32) # doctest: +SKIP - False - >>> can_cast(np.int64, np.float32) # doctest: +SKIP - True - >>> can_cast(np.float32, np.int16) # doctest: +SKIP - False - >>> can_cast(np.float32, np.int16, False, True) # doctest: +SKIP - True - >>> can_cast(np.int16, np.uint8) # doctest: +SKIP - False - - Whether we can actually cast int to uint when we don't have an intercept - depends on the data. That's why this function isn't very useful. But we - assume that an integer is using its full range, and check whether scaling - works in that situation. - - Here we need an intercept to scale the full range of an int to a uint - - >>> can_cast(np.int16, np.uint8, False, True) # doctest: +SKIP - False - >>> can_cast(np.int16, np.uint8, True, True) # doctest: +SKIP - True - """ - in_dtype = np.dtype(in_type) - # Whether we can cast depends on the data, and we've only got the type. - # Let's assume integers use all of their range but floats etc not - if in_dtype.kind in 'iu': - info = np.iinfo(in_dtype) - data = np.array([info.min, info.max], dtype=in_dtype) - else: # Float or complex or something. Any old thing will do - data = np.ones((1,), in_type) - from .arraywriters import make_array_writer, WriterError - try: - make_array_writer(data, out_type, has_slope, has_intercept) - except WriterError: - return False - return True - - def _is_compressed_fobj(fobj): """ Return True if fobj represents a compressed data file-like object """ @@ -1009,154 +941,6 @@ def working_type(in_type, slope=1.0, inter=0.0): return val.dtype.type -@deprecate_with_version('calculate_scale deprecated. ' - 'Please use arraywriter classes instead', - '1.2', - '3.0') -def calculate_scale(data, out_dtype, allow_intercept): - """ Calculate scaling and optional intercept for data - - Parameters - ---------- - data : array - out_dtype : dtype - output data type in some form understood by ``np.dtype`` - allow_intercept : bool - If True allow non-zero intercept - - Returns - ------- - scaling : None or float - scalefactor to divide into data. None if no valid data - intercept : None or float - intercept to subtract from data. None if no valid data - mn : None or float - minimum of finite value in data or None if this will not - be used to threshold data - mx : None or float - minimum of finite value in data, or None if this will not - be used to threshold data - """ - # Code here is a compatibility shell around arraywriters refactor - in_dtype = data.dtype - out_dtype = np.dtype(out_dtype) - if np.can_cast(in_dtype, out_dtype): - return 1.0, 0.0, None, None - from .arraywriters import make_array_writer, WriterError, get_slope_inter - try: - writer = make_array_writer(data, out_dtype, True, allow_intercept) - except WriterError as e: - raise ValueError(str(e)) - if out_dtype.kind in 'fc': - return (1.0, 0.0, None, None) - mn, mx = writer.finite_range() - if (mn, mx) == (np.inf, -np.inf): # No valid data - return (None, None, None, None) - if in_dtype.kind not in 'fc': - mn, mx = (None, None) - return get_slope_inter(writer) + (mn, mx) - - -@deprecate_with_version('scale_min_max deprecated. Please use arraywriter ' - 'classes instead.', - '1.2', - '3.0') -def scale_min_max(mn, mx, out_type, allow_intercept): - """ Return scaling and intercept min, max of data, given output type - - Returns ``scalefactor`` and ``intercept`` to best fit data with - given ``mn`` and ``mx`` min and max values into range of data type - with ``type_min`` and ``type_max`` min and max values for type. - - The calculated scaling is therefore:: - - scaled_data = (data-intercept) / scalefactor - - Parameters - ---------- - mn : scalar - data minimum value - mx : scalar - data maximum value - out_type : numpy type - numpy type of output - allow_intercept : bool - If true, allow calculation of non-zero intercept. Otherwise, - returned intercept is always 0.0 - - Returns - ------- - scalefactor : numpy scalar, dtype=np.maximum_sctype(np.float) - scalefactor by which to divide data after subtracting intercept - intercept : numpy scalar, dtype=np.maximum_sctype(np.float) - value to subtract from data before dividing by scalefactor - - Examples - -------- - >>> scale_min_max(0, 255, np.uint8, False) # doctest: +SKIP - (1.0, 0.0) - >>> scale_min_max(-128, 127, np.int8, False) # doctest: +SKIP - (1.0, 0.0) - >>> scale_min_max(0, 127, np.int8, False) # doctest: +SKIP - (1.0, 0.0) - >>> scaling, intercept = scale_min_max(0, 127, np.int8, True) # doctest: +SKIP - >>> np.allclose((0 - intercept) / scaling, -128) # doctest: +SKIP - True - >>> np.allclose((127 - intercept) / scaling, 127) # doctest: +SKIP - True - >>> scaling, intercept = scale_min_max(-10, -1, np.int8, True) # doctest: +SKIP - >>> np.allclose((-10 - intercept) / scaling, -128) # doctest: +SKIP - True - >>> np.allclose((-1 - intercept) / scaling, 127) # doctest: +SKIP - True - >>> scaling, intercept = scale_min_max(1, 10, np.int8, True) # doctest: +SKIP - >>> np.allclose((1 - intercept) / scaling, -128) # doctest: +SKIP - True - >>> np.allclose((10 - intercept) / scaling, 127) # doctest: +SKIP - True - - Notes - ----- - We don't use this function anywhere in nibabel now, it's here for API - compatibility only. - - The large integers lead to python long types as max / min for type. - To contain the rounding error, we need to use the maximum numpy - float types when casting to float. - """ - if mn > mx: - raise ValueError('min value > max value') - info = type_info(out_type) - mn, mx, type_min, type_max = np.array( - [mn, mx, info['min'], info['max']], np.maximum_sctype(np.float)) - # with intercept - if allow_intercept: - data_range = mx - mn - if data_range == 0: - return 1.0, mn - type_range = type_max - type_min - scaling = data_range / type_range - intercept = mn - type_min * scaling - return scaling, intercept - # without intercept - if mx == 0 and mn == 0: - return 1.0, 0.0 - if type_min == 0: # uint - if mn < 0 and mx > 0: - raise ValueError('Cannot scale negative and positive ' - 'numbers to uint without intercept') - if mx < 0: - scaling = mn / type_max - else: - scaling = mx / type_max - else: # int - if abs(mx) >= abs(mn): - scaling = mx / type_max - else: - scaling = mn / type_min - return scaling, 0.0 - - def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): """ float type containing int type `ifmt` * `slope` + `inter` From ba339d7f6204483ccb282d77ee914b21aa556270 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Sun, 24 May 2020 23:54:05 +0300 Subject: [PATCH 8/9] RF: fix py2 prints, more f-strings --- nibabel/tests/test_round_trip.py | 22 +++++++++++----------- tools/bisect_nose.py | 11 ++++++----- tools/gitwash_dumper.py | 22 ++++++++++------------ tools/profile | 30 +++++++++++++----------------- 4 files changed, 40 insertions(+), 45 deletions(-) diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 79d785932d..b469856a9e 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -71,7 +71,7 @@ def big_bad_ulp(arr): nzs = working_arr > 0 fl2[nzs] = np.floor(np.log(working_arr[nzs]) / LOGe2) fl2 = np.clip(fl2, info['minexp'], np.inf) - return 2**(fl2 - info['nmant']) + return 2 ** (fl2 - info['nmant']) def test_big_bad_ulp(): @@ -101,7 +101,7 @@ def test_round_trip(): f_types = [np.float32, np.float64] # Expanding standard deviations for i, sd_10 in enumerate(sd_10s): - sd = 10.0**sd_10 + sd = 10.0 ** sd_10 V_in = rng.normal(0, sd, size=(N, 1)) for j, in_type in enumerate(f_types): for k, out_type in enumerate(iuint_types): @@ -160,7 +160,7 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): Ai = arr - scaling_type(inter) Ais = Ai / scaling_type(slope) exp_abs_err = inting_err + inter_err + ( - big_bad_ulp(Ai) + big_bad_ulp(Ais)) + big_bad_ulp(Ai) + big_bad_ulp(Ais)) # Relative scaling error from calculation of slope # This threshold needs to be 2 x larger on windows 32 bit and PPC for # some reason @@ -181,14 +181,14 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): rel_mx_e = rel_err[abs_fails].max() else: rel_mx_e = None - print (test_id, - np.dtype(in_type).str, - np.dtype(out_type).str, - exp_abs_mx_e, - abs_mx_e, - rel_thresh, - rel_mx_e, - slope, inter) + print(test_id, + np.dtype(in_type).str, + np.dtype(out_type).str, + exp_abs_mx_e, + abs_mx_e, + rel_thresh, + rel_mx_e, + slope, inter) # To help debugging failures with --pdb-failure np.nonzero(all_fails) assert this_test diff --git a/tools/bisect_nose.py b/tools/bisect_nose.py index d3b1c0e83c..9f702bca58 100755 --- a/tools/bisect_nose.py +++ b/tools/bisect_nose.py @@ -3,7 +3,7 @@ """ DESCRIP = 'Check nose output for given text, set sys exit for git bisect' EPILOG = \ -""" + """ Imagine you've just detected a nose test failure. The failure is in a particular test or test module - here 'test_analyze.py'. The failure *is* in git branch ``main-master`` but it *is not* in tag ``v1.6.1``. Then you can @@ -54,6 +54,7 @@ GOOD = 0 BAD = 1 + def call_or_untestable(cmd): try: caller(cmd) @@ -65,7 +66,7 @@ def main(): parser = ArgumentParser(description=DESCRIP, epilog=EPILOG, formatter_class=RawDescriptionHelpFormatter) - parser.add_argument('test_path', type=str, + parser.add_argument('test_path', type=str, help='Path to test') parser.add_argument('--error-txt', type=str, help='regular expression for error of interest') @@ -77,16 +78,16 @@ def main(): args = parser.parse_args() path = os.path.abspath(args.test_path) if args.clean: - print "Cleaning" + print("Cleaning") call_or_untestable('git clean -fxd') if args.build: - print "Building" + print("Building") call_or_untestable('python setup.py build_ext -i') cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: os.chdir(tmpdir) - print "Testing" + print("Testing") proc = popener('nosetests ' + path) stdout, stderr = proc.communicate() finally: diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py index 4ebfba7557..156976daf5 100755 --- a/tools/gitwash_dumper.py +++ b/tools/gitwash_dumper.py @@ -19,10 +19,10 @@ def clone_repo(url, branch): cwd = os.getcwd() tmpdir = tempfile.mkdtemp() try: - cmd = 'git clone %s %s' % (url, tmpdir) + cmd = f'git clone {url} {tmpdir}' call(cmd, shell=True) os.chdir(tmpdir) - cmd = 'git checkout %s' % branch + cmd = f'git checkout {branch}' call(cmd, shell=True) except: shutil.rmtree(tmpdir) @@ -79,7 +79,7 @@ def copy_replace(replace_pairs, for rep_glob in rep_globs: fnames += fnmatch.filter(out_fnames, rep_glob) if verbose: - print '\n'.join(fnames) + print('\n'.join(fnames)) for fname in fnames: filename_search_replace(replace_pairs, fname, False) for in_exp, out_exp in renames: @@ -136,17 +136,17 @@ def make_link_targets(proj_name, 'and / or mailing list URLs') lines = [] if not url is None: - lines.append('.. _%s: %s\n' % (proj_name, url)) + lines.append(f'.. _{proj_name}: {url}\n') if not have_gh_url: - gh_url = 'https://github.com/%s/%s\n' % (user_name, repo_name) - lines.append('.. _`%s github`: %s\n' % (proj_name, gh_url)) + gh_url = f'https://github.com/{user_name}/{repo_name}\n' + lines.append(f'.. _`{proj_name} github`: {gh_url}\n') if not ml_url is None: - lines.append('.. _`%s mailing list`: %s\n' % (proj_name, ml_url)) + lines.append(f'.. _`{proj_name} mailing list`: {ml_url}\n') if len(lines) == 0: # Nothing to do return # A neat little header line - lines = ['.. %s\n' % proj_name] + lines + lines = [f'.. {proj_name}\n'] + lines out_links = open(out_link_fname, 'wt') out_links.writelines(lines) out_links.close() @@ -175,13 +175,11 @@ def main(): help="github username for main repo - e.g fperez", metavar="MAIN_GH_USER") parser.add_option("--gitwash-url", dest="gitwash_url", - help="URL to gitwash repository - default %s" - % GITWASH_CENTRAL, + help=f"URL to gitwash repository - default {GITWASH_CENTRAL}", default=GITWASH_CENTRAL, metavar="GITWASH_URL") parser.add_option("--gitwash-branch", dest="gitwash_branch", - help="branch in gitwash repository - default %s" - % GITWASH_BRANCH, + help=f"branch in gitwash repository - default {GITWASH_BRANCH}", default=GITWASH_BRANCH, metavar="GITWASH_BRANCH") parser.add_option("--source-suffix", dest="source_suffix", diff --git a/tools/profile b/tools/profile index c4ae6d6a35..b17ac454cb 100755 --- a/tools/profile +++ b/tools/profile @@ -11,17 +11,15 @@ __docformat__ = 'restructuredtext' -import sys, os - -from optparse import OptionParser -from os import environ, path +import os +import sys +from os import path if __name__ == "__main__": usage = """Usage: %s [options] ... """ % sys.argv[0] - # default options convert2kcache = True displaykcachegrinder = True @@ -35,8 +33,8 @@ if __name__ == "__main__": removed = sys.argv.pop(0) if not len(sys.argv): - print usage - sys.exit(1) + print(usage) + sys.exit(1) while sys.argv[0].startswith('-'): if sys.argv[0] in ["-l", "--level"]: @@ -60,7 +58,7 @@ if __name__ == "__main__": convert2kcache = False displaykcachegrinder = False else: - print usage + print(usage) sys.exit(1) sys.argv.pop(0) @@ -80,11 +78,10 @@ if __name__ == "__main__": pfilename = cmdname + ".prof" if run: - exec "import %s as runnable" % root + exec(f"import {root} as runnable") if not 'main' in runnable.__dict__: - print "OOPS: file/module %s has no function main defined" \ - % cmdname + print(f"OOPS: file/module {cmdname} has no function main defined") sys.exit(1) prof = hotshot.Profile(pfilename, lineevents=profilelines) @@ -96,13 +93,12 @@ if __name__ == "__main__": except SystemExit: pass - print "Saving profile data into %s" % pfilename + print(f"Saving profile data into {pfilename}") prof.close() - if printstats or pstatsfilename: import hotshot.stats - print "Loading profile file to print statistics" + print("Loading profile file to print statistics") stats = hotshot.stats.load(pfilename) if printstats: stats.strip_dirs() @@ -115,13 +111,13 @@ if __name__ == "__main__": if convert2kcache: cmd = "hotshot2calltree -o %s %s" % (kfilename, pfilename) if os.system(cmd): - print "!!! Make sure to install kcachegrind-converters ;-)" + print("!!! Make sure to install kcachegrind-converters ;-)") sys.exit(1) if displaykcachegrinder: if os.system('kcachegrind %s' % kfilename): - print "!!! Make sure to install kcachegrind ;-)" + print("!!! Make sure to install kcachegrind ;-)") sys.exit(1) else: - print "Go away -- nothing to look here for as a module" + print("Go away -- nothing to look here for as a module") From c389b8017dafb19aee5c5115797f9adda94d738c Mon Sep 17 00:00:00 2001 From: Jonathan Daniel Date: Mon, 25 May 2020 00:42:32 +0300 Subject: [PATCH 9/9] MNT: Oops - remove deprecations --- nibabel/__init__.py | 6 +++--- nibabel/tests/test_orientations.py | 9 +-------- nibabel/tests/test_volumeutils.py | 3 --- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 10128f4e0b..de84533822 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -58,9 +58,9 @@ from .freesurfer import MGHImage from .funcs import (squeeze_image, concat_images, four_to_three, as_closest_canonical) -from .orientations import (io_orientation, orientation_affine, - flip_axis, OrientationError, - apply_orientation, aff2axcodes) +from .orientations import (io_orientation, flip_axis, + OrientationError, apply_orientation, + aff2axcodes) from .imageclasses import class_map, ext_map, all_image_classes from .deprecated import ModuleProxy as _ModuleProxy trackvis = _ModuleProxy('nibabel.trackvis') diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index e2786c074e..9defbaca0e 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -17,8 +17,7 @@ from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, apply_orientation, OrientationError, - ornt2axcodes, axcodes2ornt, aff2axcodes, - orientation_affine) + ornt2axcodes, axcodes2ornt, aff2axcodes) from ..affines import from_matvec, to_matvec @@ -353,9 +352,3 @@ def test_inv_ornt_aff(): with pytest.raises(OrientationError): inv_ornt_aff([[0, 1], [1, -1], [np.nan, np.nan]], (3, 4, 5)) - -def test_orientation_affine_deprecation(): - aff1 = inv_ornt_aff([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - with pytest.deprecated_call(): - aff2 = orientation_affine([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - assert_array_equal(aff1, aff2) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 18bfd854f2..a3e378f0a4 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -30,9 +30,6 @@ array_to_file, allopen, # for backwards compatibility fname_ext_ul_case, - calculate_scale, # Deprecated - can_cast, # Deprecated - scale_min_max, # Deprecated write_zeros, seek_tell, apply_read_scaling,