Skip to content

Commit

Permalink
ruff-rules-for-pyupgrade
Browse files Browse the repository at this point in the history
  • Loading branch information
cclauss committed Feb 10, 2025
1 parent a8559c1 commit ac4e27d
Show file tree
Hide file tree
Showing 16 changed files with 70 additions and 80 deletions.
9 changes: 4 additions & 5 deletions compiler_opt/es/blackbox_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ def sklearn_regression_gradient(clf: LinearModel, estimator_type: EstimatorType,
"""


class QuadraticModel(object):
class QuadraticModel:
"""A class for quadratic functions.
Presents an interface for evaluating functions of the form
Expand Down Expand Up @@ -658,7 +658,7 @@ def grad(self, x: FloatArray) -> FloatArray:
return self.quad_v(x) + self.b


class ProjectedGradientOptimizer(object):
class ProjectedGradientOptimizer:
r"""A class implementing the projected gradient algorithm.
The update is given by
Expand Down Expand Up @@ -748,7 +748,7 @@ def projector(w: FloatArray) -> FloatArray:
return projector


class TrustRegionSubproblemOptimizer(object):
class TrustRegionSubproblemOptimizer:
r"""Solves the trust region subproblem over the L2 ball.
min_x f(x) s.t. \|x - p\| \leq R
Expand Down Expand Up @@ -977,8 +977,7 @@ def trust_region_test(self, current_input: FloatArray,
abs_ratio > self.params['reject_threshold'])
should_shrink = (not is_ascent and
abs_ratio > self.params['shrink_neg_threshold'])
should_grow = ((is_ascent and
tr_imp_ratio > self.params['grow_threshold']))
should_grow = (is_ascent and tr_imp_ratio > self.params['grow_threshold'])
log_message = (' fval pct change: ' + str(abs_ratio) + ' tr_ratio: ' +
str(tr_imp_ratio))
if should_reject:
Expand Down
12 changes: 2 additions & 10 deletions compiler_opt/es/regalloc_trace/regalloc_trace_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,7 @@ def _compile_module(self, module_to_compile: corpus.ModuleSpec,
parents=True, exist_ok=True)
command_vector.extend(["-o", module_output_path])

subprocess.run(
command_vector,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subprocess.run(command_vector, check=True, capture_output=True)

def _build_corpus(self, modules: Collection[corpus.ModuleSpec],
output_directory: str,
Expand Down Expand Up @@ -148,11 +144,7 @@ def _evaluate_corpus(self, module_directory: str, function_index_path: str,
f"--bb_trace_path={bb_trace_path}", "--model_type=mca"
]

output = subprocess.run(
command_vector,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
output = subprocess.run(command_vector, capture_output=True, check=True)

segment_costs = []
for line in output.stdout.decode("utf-8").split("\n"):
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/best_trajectory_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_sink_to_csv_file(self):
path = self.create_tempfile().full_path
repo = _get_test_repo_1()
repo.sink_to_csv_file(path)
with open(path, 'r', encoding='utf-8') as f:
with open(path, encoding='utf-8') as f:
text = f.read()

self.assertEqual(text,
Expand Down
5 changes: 2 additions & 3 deletions compiler_opt/rl/compilation_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,9 +459,8 @@ def collect_data(self,
sequence_example = v[0]
policy_reward = v[1]
if k not in reward_stat:
raise ValueError(
(f'Example {k} does not exist under default policy for '
f'cmd line: {final_cmd_line}'))
raise ValueError(f'Example {k} does not exist under default policy for '
f'cmd line: {final_cmd_line}')
default_reward = reward_stat[k].default_reward
moving_average_reward = reward_stat[k].moving_average_reward
sequence_example = _overwrite_trajectory_reward(
Expand Down
5 changes: 2 additions & 3 deletions compiler_opt/rl/compilation_runner_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,9 @@ def _mock_compile_fn(file_paths, tf_policy_path, reward_only, workdir): # pylin
return {'default': (sequence_example, native_size)}


_mock_policy = policy_saver.Policy(bytes(), bytes())
_mock_policy = policy_saver.Policy(b'', b'')

_mock_loaded_module_spec = corpus.LoadedModuleSpec(
name='dummy', loaded_ir=bytes())
_mock_loaded_module_spec = corpus.LoadedModuleSpec(name='dummy', loaded_ir=b'')


class CompilationRunnerTest(tf.test.TestCase):
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/distributed/learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
_SequenceFnType = Callable[[_SequenceParamsType], _SequenceParamsType]


class MLGOPPOLearner(object):
class MLGOPPOLearner:
"""Manages all the learning details needed.
These include:
Expand Down
40 changes: 20 additions & 20 deletions compiler_opt/rl/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,14 +41,14 @@ class StepType(Enum):

@dataclasses.dataclass
class TimeStep:
obs: Optional[dict[str, np.NDArray]]
reward: Optional[dict[str, float]]
score_policy: Optional[dict[str, float]]
score_default: Optional[dict[str, float]]
context: Optional[str]
obs: dict[str, np.NDArray] | None
reward: dict[str, float] | None
score_policy: dict[str, float] | None
score_default: dict[str, float] | None
context: str | None
module_name: str
working_dir: str
obs_id: Optional[int]
obs_id: int | None
step_type: StepType


Expand All @@ -68,9 +68,9 @@ class MLGOTask(metaclass=abc.ABCMeta):
"""

@abc.abstractmethod
def get_cmdline(self, clang_path: str, base_args: List[str],
interactive_base_path: Optional[str],
working_dir: str) -> List[str]:
def get_cmdline(self, clang_path: str, base_args: list[str],
interactive_base_path: str | None,
working_dir: str) -> list[str]:
"""Get the cmdline for building with this task.
The resulting list[str] should be able to be passed to subprocess.run to
Expand Down Expand Up @@ -123,7 +123,7 @@ def __init__(self, proc: subprocess.Popen,
self._module_name = module_name
self._working_dir = working_dir

def get_scores(self, timeout: Optional[int] = None):
def get_scores(self, timeout: int | None = None):
self._proc.wait(timeout=timeout)
return self._get_scores_fn()

Expand Down Expand Up @@ -223,9 +223,9 @@ def _reward_fn(a: float, b: float) -> float:
def clang_session(
clang_path: str,
module: corpus.LoadedModuleSpec,
task_type: Type[MLGOTask],
task_type: type[MLGOTask],
*,
explicit_temps_dir: Optional[str] = None,
explicit_temps_dir: str | None = None,
interactive: bool,
):
"""Context manager for clang session.
Expand Down Expand Up @@ -293,11 +293,11 @@ def _get_scores() -> dict[str, float]:

def _get_clang_generator(
clang_path: str,
task_type: Type[MLGOTask],
explicit_temps_dir: Optional[str] = None,
task_type: type[MLGOTask],
explicit_temps_dir: str | None = None,
interactive_only: bool = False,
) -> Generator[Optional[Tuple[ClangProcess, InteractiveClang]],
Optional[corpus.LoadedModuleSpec], None]:
) -> Generator[tuple[ClangProcess, InteractiveClang] | None,
corpus.LoadedModuleSpec | None, None]:
"""Returns a tuple of generators for creating InteractiveClang objects.
Args:
Expand Down Expand Up @@ -352,10 +352,10 @@ def __init__(
self,
*,
clang_path: str,
task_type: Type[MLGOTask],
task_type: type[MLGOTask],
obs_spec,
action_spec,
explicit_temps_dir: Optional[str] = None,
explicit_temps_dir: str | None = None,
interactive_only: bool = False,
):
self._clang_generator = _get_clang_generator(
Expand All @@ -366,8 +366,8 @@ def __init__(
self._obs_spec = obs_spec
self._action_spec = action_spec

self._iclang: Optional[InteractiveClang] = None
self._clang: Optional[ClangProcess] = None
self._iclang: InteractiveClang | None = None
self._clang: ClangProcess | None = None

@property
def obs_spec(self):
Expand Down
32 changes: 16 additions & 16 deletions compiler_opt/rl/imitation_learning/generate_bc_trajectories_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ def add_feature_list(seq_example: tf.train.SequenceExample,
np.dtype(np.float32),
str,
]):
raise AssertionError((f'Unsupported type for feature {feature_name}'
f' of type {type(feature_list[0])}. '
'Supported types are np.int64, np.float32, str'))
raise AssertionError(f'Unsupported type for feature {feature_name}'
f' of type {type(feature_list[0])}. '
'Supported types are np.int64, np.float32, str')
if isinstance(feature_list[0], np.float32):
add_function = add_float_feature
elif isinstance(feature_list[0], (int, np.int64)):
Expand Down Expand Up @@ -362,8 +362,8 @@ def __init__(
if self._env.action_spec:
if self._env.action_spec.dtype != tf.int64:
raise TypeError(
('Environment action_spec type '
f'{self._env.action_spec.dtype} does not match tf.int64'))
'Environment action_spec type '
f'{self._env.action_spec.dtype} does not match tf.int64')
self._exploration_frac = exploration_frac
self._max_exploration_steps = max_exploration_steps
self._max_horizon_to_explore = max_horizon_to_explore
Expand Down Expand Up @@ -412,11 +412,11 @@ def compile_module(
working_dir_head = os.path.split(self._working_dir)[0]
shutil.rmtree(working_dir_head)
if horizon <= 0:
raise ValueError(('Policy did not take any inlining decision for module '
f'{self._loaded_module_spec.name}.'))
raise ValueError('Policy did not take any inlining decision for module '
f'{self._loaded_module_spec.name}.')
if curr_obs_dict.step_type != env.StepType.LAST:
raise ValueError(('Compilation loop exited at step type'
f'{curr_obs_dict.step_type} before last step'))
raise ValueError('Compilation loop exited at step type'
f'{curr_obs_dict.step_type} before last step')
reward = curr_obs_dict.score_policy[self._reward_key]
reward_list = np.float32(reward) * np.float32(np.ones(horizon))
add_feature_list(sequence_example, reward_list,
Expand Down Expand Up @@ -593,16 +593,16 @@ def _process_obs(self, curr_obs, sequence_example):
else:
if curr_obs_feature_name not in self._env.obs_spec.keys():
raise AssertionError(
(f'Feature name {curr_obs_feature_name} not in obs_spec {1}'
f'{self._env.obs_spec.keys()}'))
f'Feature name {curr_obs_feature_name} not in obs_spec {1}'
f'{self._env.obs_spec.keys()}')
if curr_obs_feature_name in [
SequenceExampleFeatureNames.action,
SequenceExampleFeatureNames.reward,
SequenceExampleFeatureNames.module_name
]:
raise AssertionError(
(f'Feature name {curr_obs_feature_name} part of '
f'SequenceExampleFeatureNames {self._env.obs_spec.keys()}'))
f'Feature name {curr_obs_feature_name} part of '
f'SequenceExampleFeatureNames {self._env.obs_spec.keys()}')
obs_dtype = self._env.obs_spec[curr_obs_feature_name].dtype
curr_obs_feature = curr_obs[curr_obs_feature_name]
curr_obs[curr_obs_feature_name] = tf.convert_to_tensor(
Expand Down Expand Up @@ -805,9 +805,9 @@ def __init__(
if len(exploration_policy_paths) > (len(policy_paths) +
len(callable_policies)):
raise AssertionError(
(f'Number of exploration policies: {len(exploration_policy_paths)},'
'greater than number of policies: '
f'{len(policy_paths) + len(callable_policies)}'))
f'Number of exploration policies: {len(exploration_policy_paths)},'
'greater than number of policies: '
f'{len(policy_paths) + len(callable_policies)}')
self._exploration_policy_distrs = []
for exploration_policy_path in exploration_policy_paths:
expl_policy = tf.saved_model.load(
Expand Down
8 changes: 4 additions & 4 deletions compiler_opt/rl/imitation_learning/weighted_bc_trainer_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,12 +143,12 @@ def create_new_profile(self,
logging.error('KeyError: %s', k)
continue
if isinstance(prof[SequenceExampleFeatureNames.loss], str):
raise ValueError(('prof[SequenceExampleFeatureNames.loss] is a string'
'but it should be numeric.'))
raise ValueError('prof[SequenceExampleFeatureNames.loss] is a string'
'but it should be numeric.')
if isinstance(new_prof[SequenceExampleFeatureNames.loss], str):
raise ValueError(
('new_prof[SequenceExampleFeatureNames.loss] is a string'
'but it should be numeric.'))
'new_prof[SequenceExampleFeatureNames.loss] is a string'
'but it should be numeric.')
new_prof[SequenceExampleFeatureNames
.regret] = new_prof[SequenceExampleFeatureNames.loss] - prof[
SequenceExampleFeatureNames.loss]
Expand Down
4 changes: 2 additions & 2 deletions compiler_opt/rl/local_data_collector_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@
from compiler_opt.rl import local_data_collector
from compiler_opt.rl import policy_saver

_policy_str = 'policy'.encode(encoding='utf-8')
_policy_str = b'policy'

_mock_policy = policy_saver.Policy(output_spec=bytes(), policy=_policy_str)
_mock_policy = policy_saver.Policy(output_spec=b'', policy=_policy_str)


def _get_sequence_example(feature_value):
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/log_reader_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def json_to_bytes(d) -> bytes:
return json.dumps(d).encode('utf-8')


nl = '\n'.encode('utf-8')
nl = b'\n'


def write_buff(f: BinaryIO, buffer: list, ct):
Expand Down
6 changes: 3 additions & 3 deletions compiler_opt/rl/policy_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def from_filesystem(location: str):
return Policy(output_spec=output_spec, policy=policy)


class PolicySaver(object):
class PolicySaver:
"""Object that saves policy and model config file required by inference.
```python
Expand Down Expand Up @@ -190,8 +190,8 @@ def _write_output_signature(
# First entry in output list is the decision (action)
decision_spec = tf.nest.flatten(action_signature.action)
if len(decision_spec) != 1:
raise ValueError(('Expected action decision to have 1 tensor, but '
f'saw: {action_signature.action}'))
raise ValueError('Expected action decision to have 1 tensor, but '
f'saw: {action_signature.action}')

# Find the decision's tensor in the flattened output tensor list.
sm_action_decision = (
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/random_net_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


@gin.configurable
class RandomNetworkDistillation():
class RandomNetworkDistillation:
"""The Random Network Distillation class."""

def __init__(self,
Expand Down
6 changes: 3 additions & 3 deletions compiler_opt/rl/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@


@gin.configurable
class Trainer(object):
class Trainer:
"""Object that trains LLVM policy.
After initialization, the function 'train' can be called multiple times to
Expand Down Expand Up @@ -217,8 +217,8 @@ def train(self, dataset_iter, monitor_dict, num_iterations: int):
experience = next(dataset_iter)
except StopIteration:
logging.warning(
('Warning: skip training because do not have enough data to fill '
'in a batch, consider increase data or reduce batch size.'))
'Warning: skip training because do not have enough data to fill '
'in a batch, consider increase data or reduce batch size.')
break

# random network distillation for intrinsic reward generation
Expand Down
10 changes: 5 additions & 5 deletions compiler_opt/tools/generate_default_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,15 +185,15 @@ def generate_trace(
if performance_writer:
for key, value in reward_stat.items():
performance_writer.write(
(f'{module_name},{key},{value.default_reward},'
f'{value.moving_average_reward}\n'))
f'{module_name},{key},{value.default_reward},'
f'{value.moving_average_reward}\n')
logging.info('%d success, %d failed out of %d',
total_successful_examples, total_failed_examples,
total_work)

print((f'{total_successful_examples} of {len(corpus_elements)} modules '
f'succeeded, and {total_training_examples} trainining examples '
'written'))
print(f'{total_successful_examples} of {len(corpus_elements)} modules '
f'succeeded, and {total_training_examples} trainining examples '
'written')


if __name__ == '__main__':
Expand Down
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
[tool.ruff]
line-length = 103
lint.select = [ "C40", "C9", "E", "F", "PERF", "W", "YTT" ]
lint.ignore = [ "E722", "E731", "F401", "PERF203" ]
lint.select = [ "C40", "C9", "E", "F", "PERF", "UP", "W", "YTT" ]
lint.ignore = [ "E722", "E731", "F401", "PERF203", "UP009" ]
lint.mccabe.max-complexity = 18
target-version = "py38"

[tool.pytest.ini_options]
# DO always create a tracking issue when allow-listing warnings
Expand Down

0 comments on commit ac4e27d

Please sign in to comment.