Skip to content

Commit

Permalink
Style improvement
Browse files Browse the repository at this point in the history
  • Loading branch information
tbarbette committed Jul 25, 2024
1 parent ba793fb commit 1012276
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 51 deletions.
22 changes: 15 additions & 7 deletions npf/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,13 @@ def compare(self,
build.writeversion(test, all_results, allow_overwrite = True)
return tests_passed, tests_total

def regress_all_tests(self, tests: List['Test'], options, history: int = 1, on_finish = None, iserie=0, nseries=1) -> Tuple[Build, List[Dataset]]:
def regress_all_tests( self,
tests: List['Test'],
options,
history: int = 1,
on_finish = None,
do_compare:bool = True,
iserie=0, nseries=1) -> Tuple[Build, List[Dataset], List[Dataset]]:
"""
Execute all tests passed in argument for the last build of the regressor associated repository
:param history: Start regression at last build + 1 - history
Expand All @@ -167,9 +173,11 @@ def regress_all_tests(self, tests: List['Test'], options, history: int = 1, on_f
for itest,test in enumerate(tests):
print(test)
if build.version != "local":
print("[%s] Running test %s on version %s..." % (repo.name, test.filename, build.version))
print(
f"[{repo.name}] Running test {test.filename} on version {build.version}..."
)
else:
print("[%s] Running test %s..." % (repo.name, test.filename))
print(f"[{repo.name}] Running test {test.filename}...")
regression = self
if repo.last_build:
try:
Expand Down Expand Up @@ -202,10 +210,10 @@ def early_results(all_data_results, all_time_results):
except ScriptInitException:
return None, None, None

variables_passed, variables_total = regression.compare(test, test.variables, all_results, build,
old_all_results,
repo.last_build,
init_done=init_done, allow_supplementary=options.allow_supplementary)
variables_passed, variables_total = regression.compare( test, test.variables, all_results, build,
old_all_results,
repo.last_build,
init_done=init_done, allow_supplementary=options.allow_supplementary)
if variables_passed == variables_total:
nok += 1
data_datasets.append(all_results)
Expand Down
45 changes: 27 additions & 18 deletions npf/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def _parallel_exec(param: RemoteParameters):
if wf[0].isdigit():
n=int(wf[0])
wf=wf[1:]
for i in range(n):
for _ in range(n):
param.event.listen(wf)

param.event.wait_for_termination(param.delay)
Expand Down Expand Up @@ -167,7 +167,7 @@ def __init__(self, test_path, options, tags=None, role=None, inline=None):
self.filename = os.path.basename(test_path)
self.path = os.path.dirname(os.path.abspath(test_path))
self.options = options
self.tags = tags if tags else []
self.tags = tags or []
self.role = role
self.pyexits = []

Expand Down Expand Up @@ -401,11 +401,20 @@ def test_tags(self):
return missings

def build_file_list(self, v, self_role=None, files=None) -> List[Tuple[str, str, str]]:
"""
Builds a list of files based on the provided variables and role.
:param v: A dictionary of variables to be used for file creation
:param self_role: The role associated with the current instance
:param files: A list of files to process
:returns: A list of tuples containing the filename, content, and role for each file
"""
create_list = []
if files is None:
files = self.files

for s in files:
role = s.get_role() if s.get_role() else self_role
role = s.get_role() or self_role
v["NPF_NODE_MAX"] = len(npf.nodes_for_role(role))
if not s.noparse:
s.filename = SectionVariable.replace_variables(v, s.filename, role, default_role_map = self.config.get_dict("default_role_map"))
Expand All @@ -425,12 +434,12 @@ def create_files(self, file_list, path_to_root):
unique_list = {}
for filename, p, role in file_list:
if filename in unique_list:
if unique_list[filename + (role if role else '')][1] != p:
if unique_list[filename + (role or '')][1] != p:
raise Exception(
"File name conflict ! Some of your scripts try to create some file with the same name but "
"different content (%s) !" % filename)
else:
unique_list[filename + (role if role else '')] = (filename, p, role)
unique_list[filename + (role or '')] = (filename, p, role)

for _, (filename, p, role) in unique_list.items():
if self.options.show_files:
Expand Down Expand Up @@ -498,8 +507,8 @@ def update_constants(self, v_internals : dict, build : Build, full_test_folder :
tp = os.path.relpath(self.path,abs_test_folder)

if node and node.executor.path:
bp = os.path.relpath(bp, npf.experiment_path() + '/testfolder/')
rp = os.path.relpath(rp, npf.experiment_path() + '/testfolder/')
bp = os.path.relpath(bp, f'{npf.experiment_path()}/testfolder/')
rp = os.path.relpath(rp, f'{npf.experiment_path()}/testfolder/')
v_internals.update({
'NPF_REPO':get_valid_filename(build.repo.name),
'NPF_REPO_PATH': rp,
Expand All @@ -523,9 +532,9 @@ def parse_results(self, regex_list: str, output: str, new_time_results: dict, ne
for nr in re.finditer(result_regex, output.strip(), re.IGNORECASE):
result_type = nr.group("type")

kind = nr.group("kind")
if kind is None:
kind = "time"
time_ns = nr.group("kind")
if time_ns is None:
time_ns = "time"
time_value = nr.group("time_value")
if result_type is None:
result_type = ''
Expand Down Expand Up @@ -559,22 +568,22 @@ def parse_results(self, regex_list: str, output: str, new_time_results: dict, ne
result_overwrite = self.config.get_bool_or_in("result_overwrite", result_type)
if time_value:
t = float(time_value)
if result_type in new_time_results.setdefault(kind,{}).setdefault(t, {}):
if result_type in new_time_results.setdefault(time_ns,{}).setdefault(t, {}):
#Result is already known
if result_add:
new_time_results[kind][t][result_type] += n
new_time_results[time_ns][t][result_type] += n
elif result_overwrite:
new_time_results[kind][t][result_type] = n
new_time_results[time_ns][t][result_type] = n
else:
if not result_append:
print(f"WARNING: There are multiple occurences of metric {result_type} for the same time {t}, please add the metric {result_type} in result_add, result_append or result_overwrite. result_appe d is selected by default, add `result_append={{{result_type}}}` to %config to silent this message.")

if type(new_time_results[kind][t][result_type]) is not list:
new_time_results[kind][t][result_type] = [new_time_results[kind][t][result_type]]
if type(new_time_results[time_ns][t][result_type]) is not list:
new_time_results[time_ns][t][result_type] = [new_time_results[time_ns][t][result_type]]

new_time_results[kind][t][result_type].append(n)
new_time_results[time_ns][t][result_type].append(n)
else:
new_time_results[kind][t][result_type] = n
new_time_results[time_ns][t][result_type] = n
else:
if result_append:
new_data_results.setdefault(result_type,[]).append(n)
Expand Down Expand Up @@ -689,7 +698,7 @@ def execute(self, build, run, v, n_runs=1, n_retry=0, allowed_types=SectionScrip

v["NPF_ROLE"] = role
for script in t.scripts:
srole = role if role else script.get_role()
srole = role or script.get_role()
nodes = npf.nodes_for_role(srole)

autokill = m.Value('i', 0) if npf.parseBool(script.params.get("autokill", t.config["autokill"])) else None
Expand Down
48 changes: 26 additions & 22 deletions npf/test_driver.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from typing import List
from typing import Dict, List, Tuple
from npf import npf
from npf.build import Build
from npf.regression import Grapher, OrderedDict, Regression, npf
from npf.repository import Repository
from npf.statistics import Statistics
from npf.test import Test
from npf.types.dataset import Dataset
from npf.types.dataset import Dataset, Run
from npf.types.series import Series


Expand Down Expand Up @@ -48,7 +48,7 @@ def run(self, test_name, options, tags:List, on_finish=None, do_regress=True):
return self.graphs_series, self.time_graphs_series


def group_series(filename: str, series: Series , time_series:Series, options):
def group_series(filename: str, series: Series , time_series:Series, options) -> Tuple[Dataset,Dict]:
"""
The function merge different series together, finding common variables
Expand All @@ -68,7 +68,7 @@ def group_series(filename: str, series: Series , time_series:Series, options):
#Group repo if asked to do so
if options.group_repo:
repo_series=OrderedDict()
for i, (test, build, dataset) in enumerate(series):
for test, build, dataset in series:
repo_series.setdefault(build.repo.reponame,(test,build,OrderedDict()))
for run, run_results in dataset.items():
run.write_variables()['SERIE'] = build.pretty_name()
Expand Down Expand Up @@ -105,7 +105,11 @@ def group_series(filename: str, series: Series , time_series:Series, options):
all_variables.append(v_list)

if options.statistics:
Statistics.run(build,dataset, test, max_depth=options.statistics_maxdepth, filename=options.statistics_filename if options.statistics_filename else npf.build_output_filename(options, [build.repo for t,build,d in series]))
Statistics.run(build,
dataset,
test,
max_depth=options.statistics_maxdepth,
filename=options.statistics_filename or npf.build_output_filename(options, [build.repo for t,build,d in series]))

common_variables = set.intersection(*map(set, all_variables))

Expand All @@ -116,7 +120,7 @@ def group_series(filename: str, series: Series , time_series:Series, options):
for variable in common_variables:
all_values = set()
all_alone=True
for i, (test, build, dataset) in enumerate(series):
for test, build, dataset in series:
serie_values = set()
for run, result_types in dataset.items():
if variable in run.read_variables():
Expand Down Expand Up @@ -153,15 +157,15 @@ def group_series(filename: str, series: Series , time_series:Series, options):

#Keep only the variables in Time Run that are usefull as defined above
if options.do_time:
n_time_series = OrderedDict()
for i, (test, build, time_dataset) in enumerate(time_series):
for kind, dataset in time_dataset.items():
new_dataset = OrderedDict()
n_time_series.setdefault(kind,[])
for run, results in dataset.items():
new_dataset[run.intersect(useful_variables + [kind])] = results
if new_dataset:
n_time_series[kind].append((test, build, new_dataset))
n_time_series = OrderedDict()
for test, build, time_dataset in time_series:
for kind, dataset in time_dataset.items():
new_dataset = OrderedDict()
n_time_series.setdefault(kind,[])
for run, results in dataset.items():
new_dataset[run.intersect(useful_variables + [kind])] = results
if new_dataset:
n_time_series[kind].append((test, build, new_dataset))

grapher = Grapher()
print("Generating graphs...")
Expand All @@ -171,11 +175,11 @@ def group_series(filename: str, series: Series , time_series:Series, options):
options=options,
title=options.graph_title)
if options.do_time:
for kind,series in n_time_series.items():
print("Generating graph for time serie '%s'..." % kind)
g = grapher.graph(series=series,
filename=filename,
fileprefix=kind,
options=options,
title=options.graph_title)
for time_ns,series in n_time_series.items():
print(f"Generating graph for time serie '{time_ns}'...")
g = grapher.graph( series=series,
filename=filename,
fileprefix=time_ns,
options=options,
title=options.graph_title)
return series, time_series
11 changes: 7 additions & 4 deletions npf_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
from npf.test_driver import group_series

def main():
"""
The main function for running the NPF cross-repository comparator.
"""
parser = argparse.ArgumentParser(description='NPF cross-repository comparator')

npf.add_verbosity_options(parser)
Expand Down Expand Up @@ -55,14 +58,14 @@ def main():
filename = npf.ensure_folder_exists(filename)

series, time_series = comparator.run(test_name=args.test_files,
tags=args.tags,
options=args,
on_finish=
tags=args.tags,
options=args,
on_finish=
lambda series,time_series:
group_series(filename,args,series,time_series,options=args) if args.iterative else None
)

do_graph(filename, args, series, time_series, options=args)
group_series(filename, series, time_series, options=args)

if __name__ == "__main__":
multiprocessing.set_start_method('forkserver')
Expand Down

0 comments on commit 1012276

Please sign in to comment.