Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enforce linting rule RUF012 #259

Merged
merged 3 commits into from
May 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions caput/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,13 +217,13 @@ def read_config(self, config, compare_keys=False, use_defaults=True):
excluded_keys = set(prop_keys)
if set(config_keys) - excluded_keys:
raise CaputConfigError(
"Unused configuration keys: [%s]"
"Unused configuration keys: [%s]" # noqa: UP031
% ", ".join(set(config_keys) - excluded_keys),
)
if not use_defaults:
if set(prop_keys) - set(config_keys):
raise CaputConfigError(
"Missing configuration keys: [%s]"
"Missing configuration keys: [%s]" # noqa: UP031
% ", ".join(set(prop_keys) - set(config_keys)),
)

Expand Down Expand Up @@ -374,9 +374,7 @@ class Project:

def _prop(val):
if not isinstance(val, (list, tuple)):
raise CaputConfigError(
"Expected to receive a list, but got '%s.'" % repr(val)
)
raise CaputConfigError(f"Expected to receive a list, but got '{val!r}.'")

if type_:
for ii, item in enumerate(val):
Expand Down
41 changes: 18 additions & 23 deletions caput/memh5.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def __getitem__(self, name):
def __delitem__(self, name):
"""Delete item from group."""
if name not in self:
raise KeyError("Key %s not present." % name)
raise KeyError(f"Key {name} not present.")
path = posixpath.join(self.name, name)
parent_path, name = posixpath.split(path)
parent = self._storage_root[parent_path]
Expand All @@ -341,7 +341,7 @@ def require_dataset(self, name, shape, dtype, **kwargs):
except KeyError:
return self.create_dataset(name, shape=shape, dtype=dtype, **kwargs)
if is_group(d):
msg = "Entry '%s' exists and is not a Dataset." % name
msg = f"Entry '{name}' exists and is not a Dataset."
raise TypeError(msg)

return d
Expand All @@ -353,7 +353,7 @@ def require_group(self, name):
except KeyError:
return self.create_group(name)
if not is_group(g):
msg = "Entry '%s' exists and is not a Group." % name
msg = f"Entry '{name}' exists and is not a Group."
raise TypeError(msg)

return g
Expand Down Expand Up @@ -653,7 +653,7 @@ def create_group(self, name):
except KeyError:
pass
else:
raise ValueError("Entry %s exists." % name)
raise ValueError(f"Entry {name} exists.")

# If distributed, synchronise to ensure that we create group collectively
if self.distributed:
Expand All @@ -672,7 +672,7 @@ def create_group(self, name):
parent_name = posixpath.join(parent_name, part)
parent_storage = parent_storage[part]
if not isinstance(parent_storage, _Storage):
raise ValueError("Entry %s exists and is not a Group." % parent_name)
raise ValueError(f"Entry {parent_name} exists and is not a Group.")

# Underlying storage has been created. Return the group object.
return self[name]
Expand Down Expand Up @@ -930,7 +930,7 @@ def dataset_distributed_to_common(self, name):
dset = self[name]

if dset.common:
warnings.warn("%s is already a common dataset, no need to convert" % name)
warnings.warn(f"{name} is already a common dataset, no need to convert")
return dset

dset_shape = dset.shape
Expand Down Expand Up @@ -1271,11 +1271,7 @@ def __iter__(self):
return self._data.__iter__()

def __repr__(self):
return '<memh5 common dataset {}: shape {}, type "{}">'.format(
repr(self._name),
repr(self.shape),
repr(self.dtype),
)
return f'<memh5 common dataset {self._name!r}: shape {self.shape!r}, type "{self.dtype!r}">'

def __eq__(self, other):
if not isinstance(other, MemDatasetCommon):
Expand Down Expand Up @@ -1628,13 +1624,12 @@ def _resolve_subclass(cls, clspath):
try:
new_cls = misc.import_class(clspath)
except (ImportError, KeyError):
warnings.warn("Could not import memh5 subclass %s" % clspath)
warnings.warn(f"Could not import memh5 subclass {clspath}")

# Check that it is a subclass of MemDiskGroup
if not issubclass(new_cls, MemDiskGroup):
raise RuntimeError(
"Requested type (%s) is not an subclass of memh5.MemDiskGroup."
% clspath
f"Requested type ({clspath}) is not an subclass of memh5.MemDiskGroup."
)
return new_cls

Expand Down Expand Up @@ -1694,11 +1689,11 @@ def __getitem__(self, name):
path = value.name
if is_group(value):
if not self.group_name_allowed(path):
msg = "Access to group %s not allowed." % path
msg = f"Access to group {path} not allowed."
raise KeyError(msg)
else:
if not self.dataset_name_allowed(path):
msg = "Access to dataset %s not allowed." % path
msg = f"Access to dataset {path} not allowed."
raise KeyError(msg)
return value

Expand Down Expand Up @@ -1943,7 +1938,7 @@ def create_dataset(self, name, *args, **kwargs):
"""
path = posixpath.join(self.name, name)
if not self.dataset_name_allowed(path):
msg = "Dataset name %s not allowed." % path
msg = f"Dataset name {path} not allowed."
raise ValueError(msg)

return self._data.create_dataset(path, *args, **kwargs)
Expand Down Expand Up @@ -1993,7 +1988,7 @@ def create_group(self, name):
"""Create and return a new group."""
path = posixpath.join(self.name, name)
if not self.group_name_allowed(path):
msg = "Group name %s not allowed." % path
msg = f"Group name {path} not allowed."
raise ValueError(msg)
self._data.create_group(path)
return self._group_class._from_storage_root(self._data, path)
Expand Down Expand Up @@ -2172,9 +2167,9 @@ def history(self):
out = {}
for name, value in self._data["history"].items():
warnings.warn(
"memh5 dataset {} is using a deprecated history format. Read support of "
f"memh5 dataset {self.name} is using a deprecated history format. Read support of "
"files using this format will be continued for now, but you should "
"update the instance of caput that wrote this file.".format(self.name),
"update the instance of caput that wrote this file.",
DeprecationWarning,
)
out[name] = value.attrs
Expand Down Expand Up @@ -2419,7 +2414,7 @@ def get_file(f, file_format=None, **kwargs):
try:
f = file_format.open(f, **kwargs)
except OSError as e:
msg = "Opening file %s caused an error: " % str(f)
msg = f"Opening file {f!s} caused an error: "
raise OSError(msg + str(e)) from e

return f, True
Expand Down Expand Up @@ -2862,7 +2857,7 @@ def _write_distributed_datasets(dest):
# Open file on all ranks
with misc.open_h5py_mpi(fname, "r+", comm=group.comm) as f:
if not f.is_mpi:
raise RuntimeError("Could not create file %s in MPI mode" % fname)
raise RuntimeError(f"Could not create file {fname!s} in MPI mode")
_write_distributed_datasets(f)
else:
_write_distributed_datasets(fname)
Expand Down Expand Up @@ -3302,7 +3297,7 @@ def check_unicode(dset):
"""
if has_unicode(dset.dtype):
raise TypeError(
'Can not write dataset "%s" of unicode type into HDF5.' % dset.name
f'Can not write dataset "{dset.name!s}" of unicode type into HDF5.'
)

return dset.data
2 changes: 1 addition & 1 deletion caput/mpiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -2075,7 +2075,7 @@ def _reslice(slice_, n, subslice):
# In other words find a single slice that has the same affect as application of two
# successive slices
if subslice.step is not None and subslice.step > 1:
raise ValueError("stride > 1 not supported. subslice: %s" % subslice)
raise ValueError(f"stride > 1 not supported. subslice: {subslice}")

if isinstance(slice_, slice):
dstart, dstop, dstep = slice_.indices(n)
Expand Down
2 changes: 1 addition & 1 deletion caput/mpiutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -893,7 +893,7 @@ def __getattr__(self, name):
if _comm is not None and name in MPI.__dict__:
return MPI.__dict__[name]

raise AttributeError("module 'mpiutil' has no attribute '%s'" % name)
raise AttributeError(f"module 'mpiutil' has no attribute '{name}'")

def __call__(self, **kwargs):
"""Call self with set module."""
Expand Down
38 changes: 13 additions & 25 deletions caput/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,17 +434,13 @@ def _get_versions(modules):
modules = [modules]
if not isinstance(modules, list):
raise config.CaputConfigError(
"Value of 'save_versions' is of type '{}' (expected 'str' or 'list(str)').".format(
type(modules).__name__
)
f"Value of 'save_versions' is of type '{type(modules).__name__}' (expected 'str' or 'list(str)')."
)
versions = {}
for module in modules:
if not isinstance(module, str):
raise config.CaputConfigError(
"Found value of type '{}' in list 'save_versions' (expected 'str').".format(
type(module).__name__
)
f"Found value of type '{type(module).__name__}' in list 'save_versions' (expected 'str')."
)
try:
versions[module] = importlib.import_module(module).__version__
Expand Down Expand Up @@ -629,8 +625,8 @@ def run(self):
except _PipelineMissingData:
if self.tasks.index(task) == 0:
msg = (
"%s missing input data and is at beginning of"
" task list. Advancing state." % task.__class__.__name__
f"{task.__class__.__name__} missing input data and is at beginning of"
" task list. Advancing state."
)
logger.debug(msg)
task._pipeline_advance_state()
Expand Down Expand Up @@ -721,10 +717,8 @@ def _validate_task(task, in_, requires, all_out_values):
for v in value:
if v not in all_out_values:
raise config.CaputConfigError(
"Value '{}' for task {} has no corresponding 'out' from another task "
"(Value {} is not in {}).".format(
key, type(task), v, all_out_values
)
f"Value '{key}' for task {type(task)} has no corresponding 'out' from another task "
f"(Value {v} is not in {all_out_values})."
)

def _setup_task(self, task_spec):
Expand All @@ -750,11 +744,7 @@ def _setup_task(self, task_spec):
try:
task_cls = misc.import_class(task_path)
except (config.CaputConfigError, AttributeError, ModuleNotFoundError) as e:
msg = "Loading task '{}' caused error {}:\n\t{}".format(
task_path,
e.__class__.__name__,
str(e),
)
msg = f"Loading task '{task_path}' caused error {e.__class__.__name__}:\n\t{e!s}"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What's the reason for embedding e in the re-raised CaputConfigError here (I'm mostly talking about the \t{e!s} part)?

raise BarError from FooError is already going to report the original FooError in one of its two tracebacks.

And if something else is catching the CaputConfigError, isn't it better for that to get the original e out of CaputConfigError.__cause__ if it wants it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Honestly? I'm not sure. I think it was just written this way when the original pipeline runner was put together years and years ago.

I have another branch where I'm making improvements to the pipeline runner, so I'll make some error message improvements there instead so that this branch only makes style changes

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fair.

raise config.CaputConfigError(msg) from e

# Get the parameters and initialize the class.
Expand All @@ -777,7 +767,7 @@ def _setup_task(self, task_spec):
try:
params.update(self.all_params[param_key])
except KeyError as e:
msg = "Parameter group %s not found in config." % param_key
msg = f"Parameter group {param_key} not found in config."
raise config.CaputConfigError(msg) from e

# add global params to params
Expand Down Expand Up @@ -817,10 +807,8 @@ def add_task(self, task, requires=None, in_=None, out=None):
"""
try:
task._setup_keys(requires=requires, in_=in_, out=out)
except Exception as e: # noqa: BLE001
msg = "Setting up keys for task {} caused an error:\n\t{}".format(
task.__class__.__name__, str(e)
)
except Exception as e:
msg = f"Setting up keys for task {task.__class__.__name__} caused an error:\n\t{e!s}"
raise config.CaputConfigError(msg) from e

# The tasks own custom validation method
Expand Down Expand Up @@ -1043,7 +1031,7 @@ def _pipeline_next(self):
for req in self._requires:
if req is None:
raise _PipelineMissingData()
msg = "Task %s calling 'setup()'." % self.__class__.__name__
msg = f"Task {self.__class__.__name__} calling 'setup()'."
logger.debug(msg)
out = self.setup(*tuple(self._requires))
self._pipeline_advance_state()
Expand All @@ -1059,7 +1047,7 @@ def _pipeline_next(self):
for in_ in self._in:
args += (in_.get(),)
try:
msg = "Task %s calling 'next()'." % self.__class__.__name__
msg = f"Task {self.__class__.__name__} calling 'next()'."
logger.debug(msg)
return self.next(*args)
except PipelineStopIteration:
Expand All @@ -1068,7 +1056,7 @@ def _pipeline_next(self):
return None

elif self._pipeline_state == "finish":
msg = "Task %s calling 'finish()'." % self.__class__.__name__
msg = f"Task {self.__class__.__name__} calling 'finish()'."
logger.debug(msg)
out = self.finish()
self._pipeline_advance_state()
Expand Down
4 changes: 2 additions & 2 deletions caput/profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import os
import time
from pathlib import Path
from typing import Optional
from typing import ClassVar, Optional

import numpy as np
import psutil
Expand All @@ -29,7 +29,7 @@ class Profiler:
current directory.
"""

profilers = ["cprofile", "pyinstrument"]
profilers: ClassVar = ["cprofile", "pyinstrument"]

def __init__(
self,
Expand Down
10 changes: 5 additions & 5 deletions caput/scripts/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def queue(
system = conf["system"]

if system not in system_defaults:
raise ValueError('Specified system "%s": is not known.' % system)
raise ValueError(f'Specified system "{system}": is not known.')

rconf.update(**system_defaults[system])

Expand All @@ -445,7 +445,7 @@ def queue(
required_keys = {"nodes", "time", "directory", "ppn", "queue", "ompnum", "pernode"}
missing_keys = required_keys - set(rconf.keys())
if missing_keys:
raise ValueError("Missing required keys: %s" % missing_keys)
raise ValueError(f"Missing required keys: {missing_keys}")

# If no temporary directory set, just use the final directory
if "temp_directory" not in rconf:
Expand All @@ -454,12 +454,12 @@ def queue(
# Construct the working directory
workdir = expandpath(rconf["temp_directory"])
if not workdir.is_absolute():
raise ValueError("Working directory path %s must be absolute" % workdir)
raise ValueError(f"Working directory path {workdir} must be absolute")

# Construct the output directory
finaldir = expandpath(rconf["directory"])
if not finaldir.is_absolute():
raise ValueError("Final output directory path %s must be absolute" % finaldir)
raise ValueError(f"Final output directory path {finaldir} must be absolute")

# Create temporary directory if required
jobdir = workdir / "job/"
Expand Down Expand Up @@ -498,7 +498,7 @@ def queue(
if "venv" in rconf:
venvpath = expandpath(rconf["venv"] + "/bin/activate")
if not venvpath.exists():
raise ValueError("Could not find virtualenv at path %s" % rconf["venv"])
raise ValueError(f"Could not find virtualenv at path {rconf['venv']}")
rconf["venv"] = venvpath
else:
rconf["venv"] = "/dev/null"
Expand Down
4 changes: 2 additions & 2 deletions caput/tod.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def from_mult_files(
stop=None,
datasets=None,
dataset_filter=None,
**kwargs
**kwargs,
):
"""Create new data object by concatenating a series of objects.

Expand Down Expand Up @@ -221,7 +221,7 @@ def dataset_sel(self):
def dataset_sel(self, value):
for dataset_name in value:
if dataset_name not in self.datasets:
msg = "Dataset %s not in data files." % dataset_name
msg = f"Dataset {dataset_name} not in data files."
raise ValueError(msg)
self._dataset_sel = tuple(value)

Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ lint.ignore = [
"D413", # D413: Missing blank line after last section
"D416", # D416: Section name should end with a colon
"NPY002", # NPY002: replace legacy numpy.random calls with np.random.Generator
"RUF012", # RUF012 Mutable class attributes should be annotated with `typing.ClassVar`
]

# Ignore the following directories
Expand Down
Loading