Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ruff-rules-for-pyupgrade #436

Merged
merged 1 commit into from
Feb 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions compiler_opt/benchmark/benchmark_chromium.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,6 @@
from compiler_opt.benchmark import gtest_executable_utils
from compiler_opt.benchmark import benchmarking_utils

from typing import List, Dict, Union

FLAGS = flags.FLAGS

test_prefix = './compiler_opt/benchmark/chromium_test_descriptions/'
Expand Down Expand Up @@ -110,7 +108,7 @@

def build_chromium_tests(regalloc_advisor: str, chromium_build_path: str,
chromium_source_path: str, depot_tools_path: str,
llvm_build_path: str, tests_to_build: List[str]):
llvm_build_path: str, tests_to_build: list[str]):
"""Builds the chromium test suite
This function will build the specified chromium tests using the specified
Expand Down Expand Up @@ -170,9 +168,9 @@ def build_chromium_tests(regalloc_advisor: str, chromium_build_path: str,
ninja_compile_process.wait()


def run_tests(tests_to_run: List[Dict[str, Union[str, List[str]]]],
def run_tests(tests_to_run: list[dict[str, str | list[str]]],
chromium_absolute_build_path: str, num_threads: int,
perf_counters: List[str]):
perf_counters: list[str]):
"""A utility to run a set of chromium tests
This function takes in a list of test descriptions containing the
Expand Down
17 changes: 7 additions & 10 deletions compiler_opt/benchmark/benchmark_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,31 +18,28 @@
import statistics

from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Tuple
from collections.abc import Iterable

from absl import logging

# For each benchmark, and for each counter, capture the recorded values.
PerBenchmarkResults = Dict[str, Dict[str, List[float]]]
PerBenchmarkResults = dict[str, dict[str, list[float]]]

# Benchmark data, as captured by the benchmark json output: a dictionary from
# benchmark names to a list of run results. Each run result is a dictionary of
# key-value pairs, e.g. counter name - value.
BenchmarkRunResults = Dict[str, List[Dict[str, Any]]]
BenchmarkRunResults = dict[str, list[dict[str, Any]]]

# A comparison per benchmark, per counter, capturing the geomean and the stdev
# of the base and experiment values.
ABComparison = Dict[str, Dict[str, Tuple[float, float, float]]]
ABComparison = dict[str, dict[str, tuple[float, float, float]]]


def _geomean(data: List[float]):
def _geomean(data: list[float]):
return math.exp(sum(math.log(x) for x in data) / len(data))


def _stdev(data: List[float]):
def _stdev(data: list[float]):
assert data
return 0.0 if len(data) == 1 else statistics.stdev(data)

Expand Down Expand Up @@ -70,7 +67,7 @@ def counters(self):
def raw_measurements(self):
return self._raw_measurements

def counter_means(self, benchmark: str, counter: str) -> Tuple[float, float]:
def counter_means(self, benchmark: str, counter: str) -> tuple[float, float]:
if counter not in self.counters():
raise ValueError('unknown counter')
if benchmark not in self.names():
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/benchmark/benchmark_report_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
import csv
import json

from typing import Sequence
from collections.abc import Sequence

from absl import app
from absl import flags
Expand Down
6 changes: 2 additions & 4 deletions compiler_opt/benchmark/benchmarking_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,9 @@
import tensorflow
import json

from typing import Optional, List


def build_llvm(model_path: str, use_existing_build: bool, llvm_build_path: str,
llvm_source_path: Optional[str]):
llvm_source_path: str | None):
"""Builds LLVM/clang with the specified model and the correct settings
This function invokes CMake with all the correct build flags specified
Expand Down Expand Up @@ -72,7 +70,7 @@ def build_llvm(model_path: str, use_existing_build: bool, llvm_build_path: str,
cmake_compile_process.wait()


def run_microbenchmark(executable: str, perf_counters: List[str]):
def run_microbenchmark(executable: str, perf_counters: list[str]):
"""Runs all the tests in a specific google benchmark binary
This function takes in an executable and performance counters according to the
Expand Down
13 changes: 6 additions & 7 deletions compiler_opt/benchmark/gtest_executable_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@
import re

from joblib import Parallel, delayed
from typing import Tuple, List, Optional, Dict
from absl import logging


def run_test(test_executable: str, test_name: str, perf_counters: List[str]):
def run_test(test_executable: str, test_name: str, perf_counters: list[str]):
"""Runs a specific test
This function executes a specific test in a gtest executable using
Expand Down Expand Up @@ -55,7 +54,7 @@ def run_test(test_executable: str, test_name: str, perf_counters: List[str]):
return decoded_stderr


def parse_perf_stat_output(perf_stat_output: str, perf_counters: List[str]):
def parse_perf_stat_output(perf_stat_output: str, perf_counters: list[str]):
"""Parses raw output from perf stat
This function takes in the raw decoded output from perf stat
Expand All @@ -77,7 +76,7 @@ def parse_perf_stat_output(perf_stat_output: str, perf_counters: List[str]):
return counters_dict


def run_and_parse(test_description: Tuple[str, str, List[str]]):
def run_and_parse(test_description: tuple[str, str, list[str]]):
"""Runs a test and processes the output of an individual test
This function takes in a description of an individual test, runs the test
Expand All @@ -98,9 +97,9 @@ def run_and_parse(test_description: Tuple[str, str, List[str]]):
return None


def run_test_suite(test_suite_description: Dict[str, List[str]],
test_executable: str, perf_counters: List[str],
num_threads: Optional[int]):
def run_test_suite(test_suite_description: dict[str, list[str]],
test_executable: str, perf_counters: list[str],
num_threads: int | None):
"""Runs an entire test suite
This function takes in a test set description in the form of a path to a JSON
Expand Down
15 changes: 8 additions & 7 deletions compiler_opt/distributed/buffered_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,18 @@
import concurrent.futures
import threading

from typing import Any, Callable, Iterable, List, Optional, Tuple, TypeVar
from typing import Any, TypeVar
from collections.abc import Callable, Iterable

from compiler_opt.distributed import worker

T = TypeVar('T')
W = TypeVar('W')


def schedule(work: List[Callable[[T], worker.WorkerFuture]],
workers: List[T],
buffer=2) -> List[concurrent.futures.Future]:
def schedule(work: list[Callable[[T], worker.WorkerFuture]],
workers: list[T],
buffer=2) -> list[concurrent.futures.Future]:
"""
Assigns work to workers once previous work of the worker are
completed.
Expand Down Expand Up @@ -86,8 +87,8 @@ def schedule_on_worker_pool(
action: Callable[[W, T], Any],
jobs: Iterable[T],
worker_pool: worker.WorkerPool,
buffer_size: Optional[int] = None
) -> Tuple[List[W], List[concurrent.futures.Future]]:
buffer_size: int | None = None
) -> tuple[list[W], list[concurrent.futures.Future]]:
"""
Schedule the given action on workers from the given worker pool.
Args:
Expand All @@ -111,7 +112,7 @@ def work(w: worker.Worker):
return work

work = [work_factory(job) for job in jobs]
workers: List[W] = worker_pool.get_currently_active()
workers: list[W] = worker_pool.get_currently_active()
return workers, schedule(work, workers,
(worker_pool.get_worker_concurrency()
if buffer_size is None else buffer_size))
13 changes: 7 additions & 6 deletions compiler_opt/distributed/local/local_worker_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@

from contextlib import AbstractContextManager
from multiprocessing import connection
from typing import Any, Callable, Dict, List, Optional
from typing import Any
from collections.abc import Callable


@dataclasses.dataclass(frozen=True)
Expand Down Expand Up @@ -139,7 +140,7 @@ def __init__(self):
# lock for the msgid -> reply future map. The map will be set to None
# when we stop.
self._lock = threading.Lock()
self._map: Dict[int, concurrent.futures.Future] = {}
self._map: dict[int, concurrent.futures.Future] = {}

# thread draining the pipe
self._pump = threading.Thread(target=self._msg_pump)
Expand All @@ -164,7 +165,7 @@ def observer():

def _msg_pump(self):
while True:
task_result: Optional[TaskResult] = self._pipe.recv()
task_result: TaskResult | None = self._pipe.recv()
if task_result is None: # Poison pill fed by observer
break
with self._lock:
Expand Down Expand Up @@ -229,7 +230,7 @@ def set_nice(self, val: int):
"""
psutil.Process(self._process.pid).nice(val)

def set_affinity(self, val: List[int]):
def set_affinity(self, val: list[int]):
"""Sets the CPU affinity of the process, this modifies which cores the OS
schedules it on.
"""
Expand All @@ -247,7 +248,7 @@ def __dir__(self):


def create_local_worker_pool(worker_cls: 'type[worker.Worker]',
count: Optional[int], *args,
count: int | None, *args,
**kwargs) -> worker.FixedWorkerPool:
"""Create a local worker pool for worker_cls."""
if not count:
Expand All @@ -271,7 +272,7 @@ def close_local_worker_pool(pool: worker.FixedWorkerPool):
class LocalWorkerPoolManager(AbstractContextManager):
"""A pool of workers hosted on the local machines, each in its own process."""

def __init__(self, worker_class: 'type[worker.Worker]', count: Optional[int],
def __init__(self, worker_class: 'type[worker.Worker]', count: int | None,
*args, **kwargs):
self._pool = create_local_worker_pool(worker_class, count, *args, **kwargs)

Expand Down
9 changes: 5 additions & 4 deletions compiler_opt/distributed/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
"""Common abstraction for a worker contract."""

import abc
from typing import Any, List, Iterable, Optional, Protocol, TypeVar
from typing import Any, Protocol, TypeVar
from collections.abc import Iterable

import gin

Expand All @@ -35,7 +36,7 @@ class WorkerPool(metaclass=abc.ABCMeta):

# Issue #155 would strongly-type the return type.
@abc.abstractmethod
def get_currently_active(self) -> List[Any]:
def get_currently_active(self) -> list[Any]:
raise NotImplementedError()

@abc.abstractmethod
Expand All @@ -47,7 +48,7 @@ class FixedWorkerPool(WorkerPool):
"""A WorkerPool built from a fixed list of workers."""

# Issue #155 would strongly-type `workers`
def __init__(self, workers: List[Any], worker_concurrency: int = 2):
def __init__(self, workers: list[Any], worker_concurrency: int = 2):
self._workers = workers
self._worker_concurrency = worker_concurrency

Expand Down Expand Up @@ -80,7 +81,7 @@ def wait_for(futures: Iterable[WorkerFuture]):
pass


def get_exception(worker_future: WorkerFuture) -> Optional[Exception]:
def get_exception(worker_future: WorkerFuture) -> Exception | None:
assert worker_future.done()
try:
_ = worker_future.result()
Expand Down
17 changes: 8 additions & 9 deletions compiler_opt/es/blackbox_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

import abc
import concurrent.futures
from typing import List, Optional

from absl import logging
import gin
Expand All @@ -36,16 +35,16 @@ def __init__(self, train_corpus: corpus.Corpus):

@abc.abstractmethod
def get_results(
self, pool: FixedWorkerPool, perturbations: List[policy_saver.Policy]
) -> List[concurrent.futures.Future]:
self, pool: FixedWorkerPool, perturbations: list[policy_saver.Policy]
) -> list[concurrent.futures.Future]:
raise NotImplementedError()

@abc.abstractmethod
def set_baseline(self, pool: FixedWorkerPool) -> None:
raise NotImplementedError()

def get_rewards(
self, results: List[concurrent.futures.Future]) -> List[Optional[float]]:
self, results: list[concurrent.futures.Future]) -> list[float | None]:
rewards = [None] * len(results)

for i in range(len(results)):
Expand Down Expand Up @@ -74,8 +73,8 @@ def __init__(self, train_corpus: corpus.Corpus,
super().__init__(train_corpus)

def get_results(
self, pool: FixedWorkerPool, perturbations: List[policy_saver.Policy]
) -> List[concurrent.futures.Future]:
self, pool: FixedWorkerPool, perturbations: list[policy_saver.Policy]
) -> list[concurrent.futures.Future]:
if not self._samples:
for _ in range(self._total_num_perturbations):
sample = self._train_corpus.sample(self._num_ir_repeats_within_worker)
Expand Down Expand Up @@ -118,11 +117,11 @@ def __init__(self, train_corpus: corpus.Corpus,
self._bb_trace_path = bb_trace_path
self._function_index_path = function_index_path

self._baseline: Optional[float] = None
self._baseline: float | None = None

def get_results(
self, pool: FixedWorkerPool, perturbations: List[policy_saver.Policy]
) -> List[concurrent.futures.Future]:
self, pool: FixedWorkerPool, perturbations: list[policy_saver.Policy]
) -> list[concurrent.futures.Future]:
job_args = [{
'modules': self._train_corpus.module_specs,
'function_index_path': self._function_index_path,
Expand Down
Loading