From e5ff75959a6f11efa828bab539b26d8597fc6ee5 Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Mon, 3 Jun 2024 22:41:59 +0200 Subject: [PATCH 1/7] exiting in case of a loop detection! --- sxpat/xplore.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/sxpat/xplore.py b/sxpat/xplore.py index e57782851..7b982b196 100644 --- a/sxpat/xplore.py +++ b/sxpat/xplore.py @@ -1,7 +1,7 @@ import csv import time from typing import Iterable, Iterator, List, Union - +import networkx as nx from tabulate import tabulate import math @@ -27,6 +27,7 @@ def explore_grid(specs_obj: TemplateSpecs): + previous_subgraphs = [] print(f'{specs_obj = }') labeling_time: float = -1 @@ -140,6 +141,8 @@ def explore_grid(specs_obj: TemplateSpecs): # extract subgraph t_start = time.time() subgraph_is_available = template_obj.current_graph.extract_subgraph(specs_obj) + + previous_subgraphs.append(template_obj.current_graph.subgraph) subgraph_extraction_time = time.time() - t_start subgraph_number_inputs = template_obj.current_graph.subgraph_num_inputs subgraph_number_outputs = template_obj.current_graph.subgraph_num_outputs @@ -372,7 +375,21 @@ def explore_grid(specs_obj: TemplateSpecs): if exists_an_area_zero(current_population): break - display_the_tree(total) + + loop_detected = False + for idx, s in enumerate(previous_subgraphs): + if idx == len(previous_subgraphs) - 1 and idx > 1: + if nx.utils.graphs_equal(previous_subgraphs[idx-2], previous_subgraphs[idx-1]) and \ + nx.utils.graphs_equal(previous_subgraphs[idx-2], previous_subgraphs[idx-3]): + print(f'The last three subgraphs are equal') + pprint.info3(f'The last three subgraphs are equal!') + pprint.info3(f'Terminating the exploration!') + loop_detected = True + break + if loop_detected: + break + + # display_the_tree(total) stats_obj.store_grid() return stats_obj From eb59b9383ac67e2aad944cc66ba0c73ea7e395b1 Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Wed, 5 Jun 2024 08:30:02 +0200 Subject: [PATCH 2/7] infinite loop fixed! --- sxpat/stats.py | 2 +- sxpat/xplore.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/sxpat/stats.py b/sxpat/stats.py index 070898faf..8c4e44e91 100644 --- a/sxpat/stats.py +++ b/sxpat/stats.py @@ -566,7 +566,7 @@ def get_grid_name(self): technique_specific += f'fef{self.specs.full_error_function}_' technique_specific += f'sef{self.specs.sub_error_function}_' - tail = f'mode{self.specs.mode}_omax{self.specs.omax}_imax{self.specs.imax}_' + tail = f'mode{self.specs.mode}_ns{self.specs.num_subgraphs}_omax{self.specs.omax}_imax{self.specs.imax}_' tail += f'kuc{self.specs.keep_unsat_candidate}_' tail += f'{self.template_name}_time' diff --git a/sxpat/xplore.py b/sxpat/xplore.py index 7b982b196..f06a99443 100644 --- a/sxpat/xplore.py +++ b/sxpat/xplore.py @@ -82,6 +82,7 @@ def explore_grid(specs_obj: TemplateSpecs): i = 0 prev_actual_error = 0 prev_given_error = 0 + max_unsat_reached = False while (obtained_wce_exact < available_error): i += 1 if specs_obj.et_partitioning == 'asc': @@ -107,11 +108,15 @@ def explore_grid(specs_obj: TemplateSpecs): if (specs_obj.subxpat or specs_obj.subxpat_v2) else f'Only one iteration with et {et}') + if max_unsat_reached and not template_obj.is_two_phase_kind: + break + # for all candidates for candidate in current_population: # guard if pre_iter_unsats[candidate] == total_number_of_cells_per_iter and not specs_obj.keep_unsat_candidate: pprint.info1(f'Number of UNSATs reached!') + max_unsat_reached = True continue pprint.info1(f'candidate {candidate}') From e618b6cb17b89d53d468671a3b95fd2db55e04b5 Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Tue, 25 Jun 2024 00:40:50 +0200 Subject: [PATCH 3/7] this is the branch with which I was running the experiments on the server --- analyze.py | 597 +++++++++++++++++++++++++++ sxpat/annotatedGraph.py | 863 +++++++++++++++++++++++++++++++++++---- sxpat/synthesis.py | 15 +- sxpat/templateCreator.py | 28 +- sxpat/xplore.py | 94 +++-- 5 files changed, 1469 insertions(+), 128 deletions(-) create mode 100644 analyze.py diff --git a/analyze.py b/analyze.py new file mode 100644 index 000000000..b7ceaa61b --- /dev/null +++ b/analyze.py @@ -0,0 +1,597 @@ +import csv +import os +import pandas as pd +from scipy.stats import spearmanr +import matplotlib.pyplot as plt +import re +import itertools + +from typing import List, Tuple, Dict +from sxpat.config.paths import * +from sxpat.arguments import Arguments + +def main(): + + args = Arguments.parse() + print(f'{args.benchmark_name = }') + folder = f'experiments/mecals/{args.benchmark_name}' + mecals_area_error = get_mecals_area_error(args, folder) + + folder = 'experiments/results' + rel_files = get_relevant_files(args, folder) + + area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) + plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, mecals_area_error) + + + error_cells_subgraph_inputs_dict_per_mode = get_subgraph_inputs_vs_sat_cells(rel_files, folder) + report_sat_cells_vs_subgraph_inputs(args, error_cells_subgraph_inputs_dict_per_mode) + + + + # error_cells_dict_per_mode = get_sat_cells(rel_files, folder) + # plot_cells_per_mode(args, error_cells_dict_per_mode) + + + + get_runtime_per_mode_decomposition(rel_files, folder) + sorted_runtime_error_per_mode = get_runtime_per_mode(rel_files, folder) + plot_runtime_error_per_mode(args, sorted_runtime_error_per_mode) + + + + sorted_area_error = get_area_error(rel_files, folder) + plot_area_error(args, sorted_area_error, mecals_area_error) + + # area_error_ns_dict = get_area_error_per_num_subgraphs(rel_files, folder) + # plot_area_error_per_ns(args, area_error_ns_dict) + + area_error_mode_dict = get_area_error_per_mode(rel_files, folder) + plot_area_error_per_mode(args, area_error_mode_dict) + + # sorted_aggregated_runtime_error = get_runtime_aggregated(rel_files, folder) + # plot_runtime_aggregated(args, sorted_aggregated_runtime_error) + + + + + # print(f'{sorted_area_error = }') + # gather the data: + + + + + # draw the figures we want: + + +def get_relevant_files(args: Arguments, folder): + # search the folder: + all_files = [f for f in os.listdir(folder)] + relevant_files = [] + for file in all_files: + if re.search(args.benchmark_name, file) and file.endswith('.csv'): + relevant_files.append(file) + return relevant_files + + + + +def get_sat_cells(relevant_files, folder): + error_cell_dict_per_mode: Dict = {} + error_cell_dict: Dict = {} + for mode in range(20): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + cell = line[0] + cell = cell.replace('(', '').replace(')', '').strip().split(',') + + lpp = re.search('(\d+)', cell[0]).group()[0] + ppo = re.search('(\d+)', cell[1]).group()[0] + + error = 2 ** (int(line[1]) - 1) + + if error not in error_cell_dict.keys(): + error_cell_dict[error] = [] + + error_cell_dict[error].append((lpp, ppo)) + if error_cell_dict: + error_cell_dict_per_mode[mode] = error_cell_dict + error_cell_dict = {} + return error_cell_dict_per_mode + +def get_subgraph_inputs_vs_sat_cells(relevant_files, folder): + error_cell_dict_per_mode: Dict = {} + error_cell_dict: Dict = {} + for mode in range(20): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + sub_in = int(line[11]) + cell = line[0] + cell = cell.replace('(', '').replace(')', '').strip().split(',') + + lpp = re.search('(\d+)', cell[0]).group()[0] + ppo = re.search('(\d+)', cell[1]).group()[0] + + error = 2 ** (int(line[1]) - 1) + + if error not in error_cell_dict.keys(): + error_cell_dict[error] = [] + + error_cell_dict[error].append((lpp, ppo, sub_in)) + if error_cell_dict: + error_cell_dict_per_mode[mode] = error_cell_dict + error_cell_dict = {} + return error_cell_dict_per_mode + +def get_area_error(relevant_files, folder): + area_error: List[Tuple[float, int]] = [] + for rep in relevant_files: + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + error = int(line[8]) + area_error.append((area, error)) + + sorted_area_error = sorted(area_error, key=lambda x: x[1]) + + return sorted_area_error + +def get_mecals_area_error(args, folder): + area_error: List[Tuple[float, int]] = [] + relevant_files = _get_mecals_rel_files(args, folder) + + if relevant_files: + for rep in relevant_files: + with open(f'{folder}/{rep}', 'r') as f: + cur_area = float(f.readline()) + cur_wce = int(re.search('wce(\d+).', rep).group(1)) + + area_error.append((cur_area, cur_wce)) + sorted_area_error = sorted(area_error, key=lambda x: x[1]) + return sorted_area_error + else: + return [] +def get_runtime_aggregated(relevant_files, folder): + runtime_error: List[Tuple[float, int]] = [] + runtime_dict = {} + for rep in relevant_files: + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + error = int(line[8]) + runtime = float(line[9]) + float(line[10]) + float(line[-1]) + if error not in runtime_dict: + runtime_dict[error] = runtime + else: + runtime_dict[error] += runtime + + runtime_error = [(runtime, error) for error, runtime in runtime_dict.items()] + + + sorted_runtime_error = sorted(runtime_error, key=lambda x: x[1]) + + + return sorted_runtime_error +def get_area_error_per_num_subgraphs(relevant_files, folder): + area_error_ns_dict: Dict[int: [float, int]] = {} + area_error: List[Tuple[float, int]] = [] + for ns in [1, 5, 10, 15]: + for rep in relevant_files: + if re.search(f'ns{ns}_', rep): + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + error = int(line[8]) + + area_error.append((area, error)) + + sorted_area_error = sorted(area_error, key=lambda x: x[1]) + area_error_ns_dict[ns] = sorted_area_error + area_error = [] + sorted_area_error = [] + return area_error_ns_dict + +def get_area_error_per_mode(relevant_files, folder): + area_error_mode_dict: Dict[int: [float, int]] = {} + area_error: List[Tuple[float, int]] = [] + for mode in range(20): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + error = int(line[8]) + + area_error.append((area, error)) + + sorted_area_error = sorted(area_error, key=lambda x: x[1]) + if sorted_area_error: + area_error_mode_dict[mode] = sorted_area_error + area_error = [] + sorted_area_error = [] + + return area_error_mode_dict + + +def get_area_error_per_mode_grid(relevant_files, folder): + area_error_mode_per_grid_dict: Dict[int: [float, int]] = {} + area_error_per_grid = {} + grids = [] + for mode in range(200): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + grid = re.search(f'(\d+X\d+)', rep).group() + if grid not in grids: + grids.append(grid) + + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + error = int(line[8]) + + if grid not in area_error_per_grid.keys(): + area_error_per_grid[grid] = [] + area_error_per_grid[grid].append((area, error)) + + + if area_error_per_grid: + area_error_mode_per_grid_dict[mode] = area_error_per_grid + area_error_per_grid = {} + return area_error_mode_per_grid_dict + + + +def get_runtime_per_num_subgraphs(relevant_files, folder): + pass + +def get_runtime_per_mode(relevant_files, folder): + + runtime_error_dict: Dict[Tuple[int, float]] = {} + runtime_error_per_mode_dict = {} + for mode in range(20): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + error = int(line[8]) + runtime = float(line[9]) + float(line[10]) + float(line[-1]) + + if error not in runtime_error_dict: + runtime_error_dict[error] = runtime + else: + runtime_error_dict[error] += runtime + + runtime_error = [(runtime, error) for error, runtime in runtime_error_dict.items()] + sorted_runtime_error = sorted(runtime_error, key=lambda x: x[1]) + if sorted_runtime_error: + runtime_error_per_mode_dict[mode] = sorted_runtime_error + runtime_error_dict ={} + return runtime_error_per_mode_dict + + +def get_runtime_per_mode_decomposition(relevant_files, folder): + + runtime_error_decomposed_dict: Dict[Tuple[int, float]] = {} + runtime_error_per_mode_dict = {} + for mode in range(20): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + error = int(line[8]) + subgraph_extraction = float(line[9]) + labeling = float(line[10]) + exploration = float(line[-1]) + if error not in runtime_error_decomposed_dict: + runtime_error_decomposed_dict[error] = [] + runtime_error_decomposed_dict[error].append((subgraph_extraction, labeling, exploration)) + + + + # runtime_error = [(runtime, error) for error, runtime in runtime_error_dict.items()] + # sorted_runtime_error = sorted(runtime_error, key=lambda x: x[1]) + # if sorted_runtime_error: + # runtime_error_per_mode_dict[mode] = sorted_runtime_error + # runtime_error_dict ={} + # for el in runtime_error_decomposed_dict: + # print(f'{runtime_error_decomposed_dict[el] = }') + # print(f'{runtime_error_decomposed_dict = }') + return runtime_error_per_mode_dict + + +def report_sat_cells(args, error_cell_dict_per_mode: Dict): + records = [] + for mode, thresholds in error_cell_dict_per_mode.items(): + for threshold, tuples in thresholds.items(): + for tpl in tuples: + records.append((mode, threshold, tpl)) + + # Create a DataFrame from the records + df = pd.DataFrame(records, columns=['Mode', 'Error_Threshold', 'Tuple']) + + # Summarize the DataFrame by counting occurrences of each tuple for every mode and error threshold + summary = df.groupby(['Mode', 'Error_Threshold', 'Tuple']).size().reset_index(name='Count') + + # Calculate the total count for each mode + total_counts = summary.groupby('Mode')['Count'].transform('sum') + + # Calculate the percentage + summary['Percentage'] = round((summary['Count'] / total_counts) * 100, 2) + + folder, _ = OUTPUT_PATH['figure'] + # Save the summary DataFrame to a CSV file + summary.to_csv(f'{folder}/{args.benchmark_name}_error_sat_cells_distribution_per_mode.csv', index=False) + print(f'{summary}') + + +def report_sat_cells_vs_subgraph_inputs(args, error_cells_subgraph_inputs_dict_per_mode: Dict): + records = [] + for mode, thresholds in error_cells_subgraph_inputs_dict_per_mode.items(): + for threshold, tuples in thresholds.items(): + for tpl in tuples: + lpp, ppo, subgraph_input = tpl + records.append((mode, threshold, (lpp, ppo), subgraph_input)) + + df = pd.DataFrame(records, columns=['Mode', 'Error_Threshold', 'Tuple (lpp, ppo)', 'Subgraph_Input']) + + # Summarize the DataFrame + summary = df.groupby(['Mode', 'Error_Threshold', 'Tuple (lpp, ppo)'])['Subgraph_Input'].agg( + ['count', 'mean', 'std']).reset_index() + + # Rename columns for clarity + summary.columns = ['Mode', 'Error_Threshold', 'Tuple (lpp, ppo)', 'Count', 'Mean of Subgraph Inputs', + 'Std of Subgraph Inputs'] + + # Calculate the total count for each mode + total_counts = summary.groupby('Mode')['Count'].transform('sum') + + # Calculate the percentage + summary['Percentage'] = round((summary['Count'] / total_counts) * 100, 2) + + folder, _ = OUTPUT_PATH['figure'] + # Save the summary DataFrame to a CSV file + summary.to_csv(f'{folder}/{args.benchmark_name}_error_sat_cells_distribution_per_mode.csv', index=False) + print(f'{summary}') + + + +def plot_cells_per_mode(args, error_cell_dict_per_mode): + def tuple_score(tpl): + return sum(int(x) for x in tpl) + + # Calculate the total score for each mode + mode_scores = {} + for mode, thresholds in error_cell_dict_per_mode.items(): + total_score = 0 + for threshold, tuples in thresholds.items(): + for tpl in tuples: + total_score += tuple_score(tpl) + mode_scores[mode] = total_score + + # Convert the scores to a DataFrame + scores_df = pd.DataFrame(list(mode_scores.items()), columns=['Mode', 'Total_Score']) + + # Calculate Spearman rank correlation + corr, p_value = spearmanr(scores_df['Mode'], scores_df['Total_Score']) + + print(f"Spearman Correlation Coefficient: {corr}") + print(f"P-value: {p_value}") + + plt.figure(figsize=(8, 5)) + plt.bar(scores_df['Mode'], scores_df['Total_Score'], color=['blue', 'orange']) + plt.xlabel('Mode') + plt.ylabel('Total Sum of lpp and ppo for SAT cells') + plt.title('Total Scores') + plt.xticks(scores_df['Mode']) + + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_cell_scores_per_mode.png' + plt.savefig(figname) + +def plot_area_error(args, sorted_area_error: List, mecals_area_error: List = None): + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + + # Plot the data + plt.figure(figsize=(10, 6)) + assert len(errors) == len(areas) + if mecals_area_error: + mecals_areas = [item[0] for item in mecals_area_error] + mecals_errors = [item[1] for item in mecals_area_error] + plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') + plt.legend() + + plt.plot(errors, areas, 'o', label='SubXPAT', color='blue') + plt.title(f'{args.benchmark_name}, ') + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_all.png' + plt.savefig(figname) + +def plot_area_error_per_ns(args, area_error_ns_dict: Dict): + plt.figure(figsize=(10, 6)) + plt.title(f'{args.benchmark_name} per number of subgraphs') + marker = itertools.cycle(('D', 'o', 'X', 's')) + for ns in area_error_ns_dict.keys(): + print(f'{ns = }') + + sorted_area_error = area_error_ns_dict[ns] + # print(f'{sorted_area_error = }') + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + + + plt.scatter(errors, areas, marker=next(marker), label=ns, alpha=0.5) + plt.legend() + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_per_num_subgraphs.png' + plt.savefig(figname) + +def plot_runtime_error_per_mode(args, runtime_error_per_mode_dict: Dict): + plt.figure(figsize=(10, 6)) + plt.title(f'{args.benchmark_name} per mode') + marker = itertools.cycle(('D', 'o', 'X', 's')) + for mode in runtime_error_per_mode_dict.keys(): + print(f'{mode = }') + + sorted_area_error = runtime_error_per_mode_dict[mode] + + runtimes = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + + print(f'{round(sum(runtimes)/60, 2) = }') + cur_time = round(sum(runtimes)/60, 2) + plt.plot(errors, runtimes, marker=next(marker), label=f'{mode}, {cur_time}', alpha=0.5, linestyle='--') + + plt.legend() + plt.xlabel('ET') + plt.ylabel('Runtime') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_runtime_per_mode.png' + plt.savefig(figname) + + + +def plot_area_error_per_mode(args, area_error_mode_dict: Dict): + plt.figure(figsize=(10, 6)) + plt.title(f'{args.benchmark_name} per mode') + marker = itertools.cycle(('D', 'o', 'X', 's')) + for mode in area_error_mode_dict.keys(): + print(f'{mode = }') + + sorted_area_error = area_error_mode_dict[mode] + # print(f'{sorted_area_error = }') + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + + + plt.scatter(errors, areas, marker=next(marker), label=mode, alpha=0.5) + plt.legend() + plt.xlabel('ET') + plt.ylabel('Runtime') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_area_per_mode.png' + plt.savefig(figname) + + +def plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict: Dict, mecals_area_error = None): + plt.figure(figsize=(10, 6)) + plt.title(f'{args.benchmark_name} per mode and grid') + marker = itertools.cycle(('D', 'o', 'X', 's')) + for mode in area_error_mode_per_grid_dict.keys(): + + + sorted_area_error_per_mode = area_error_mode_per_grid_dict[mode] + for grid in sorted_area_error_per_mode.keys(): + sorted_area_error = sorted_area_error_per_mode[grid] + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + plt.scatter(errors, areas, marker=next(marker), label=f'mode{mode}, grid{grid}' , alpha=0.5) + + if mecals_area_error: + mecals_areas = [item[0] for item in mecals_area_error] + mecals_errors = [item[1] for item in mecals_area_error] + plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') + plt.legend() + + plt.legend() + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_area_per_mode_and_grid.png' + plt.savefig(figname) + + + + +def plot_runtime_aggregated(args, sorted_runtime_error): + areas = [item[0] for item in sorted_runtime_error] + errors = [item[1] for item in sorted_runtime_error] + + # Plot the data + plt.figure(figsize=(10, 6)) + assert len(errors) == len(areas) + + plt.plot(errors, areas, 'o-') + plt.title(f'{args.benchmark_name}, ') + plt.xlabel('ET') + plt.ylabel('Runtime') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_aggregated_runtime.png' + plt.savefig(figname) + + + + +def _get_mecals_rel_files(args, folder): + # search the folder: + if os.path.exists(folder): + all_files = [f for f in os.listdir(folder)] + else: + return [] + relevant_files = [] + for file in all_files: + if re.search(args.benchmark_name, file) and file.endswith('.area'): + relevant_files.append(file) + return relevant_files + + +def get_muscat_area_error(): + pass + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sxpat/annotatedGraph.py b/sxpat/annotatedGraph.py index 3686d7fa6..cac399094 100644 --- a/sxpat/annotatedGraph.py +++ b/sxpat/annotatedGraph.py @@ -352,6 +352,30 @@ def extract_subgraph(self, specs_obj: TemplateSpecs): for gate_idx in self.gate_dict: if self.subgraph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] == 1: cnt_nodes += 1 + elif mode == 121: + if self.subgraph_candidates: + pprint.info2( + f"Selecting the next subgraph candidate") + self.subgraph = self.form_subgraph_from_partition() + else: + pprint.info2( + f"Partition with omax={specs_obj.omax} and soft feasibility constraints on subgraph outputs, and limited inputs. Looking for largest partition") + self.subgraph = self.find_subgraph_feasible_soft_outputs_limited_inputs( + specs_obj) # Critian's subgraph extraction + cnt_nodes = 0 + for gate_idx in self.gate_dict: + if self.subgraph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] == 1: + cnt_nodes += 1 + elif mode == 51: + pprint.info2( + f"Partition with omax={specs_obj.omax} and hard feasibility constraints on inputs and outputs. Looking for largest partition") + self.subgraph = self.find_subgraph_feasible_hard_limited_inputs(specs_obj) # Critian's subgraph extraction + cnt_nodes = 0 + for gate_idx in self.gate_dict: + if self.subgraph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] == 1: + cnt_nodes += 1 + + pprint.success(f" (#ofNodes={cnt_nodes})") else: raise Exception('invalid mode!') else: @@ -1873,16 +1897,25 @@ def find_subgraph_feasible_hard(self, specs_obj: TemplateSpecs): tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE return tmp_graph - def find_subgraph_feasible_soft(self, specs_obj: TemplateSpecs): + + def find_subgraph_feasible_hard_limited_inputs(self, specs_obj: TemplateSpecs): """ extracts a colored subgraph from the original non-partitioned graph object :return: an annotated graph in which the extracted subgraph is colored """ omax = specs_obj.omax feasibility_treshold = specs_obj.et + # print(f'{feasibility_treshold = }') - tmp_graph = self.graph.copy(as_view=False) + # pprint.info2(f'finding a subgraph (imax={imax}, omax={omax}) for {self.name}... ') + # Todo: + # 1) First, the number of outputs or outgoing edges of the subgraph + # Potential Fitness function = #of nodes/ (#ofInputs + #ofOutputs) + # print(f'Extracting subgraph...') + tmp_graph = self.graph.copy(as_view=False) + # print(f'{tmp_graph.nodes = }') + # Data structures containing the literals input_literals = {} # literals associated to the input nodes gate_literals = {} # literals associated to the gates in the circuit output_literals = {} # literals associated to the output nodes @@ -2071,21 +2104,23 @@ def find_subgraph_feasible_soft(self, specs_obj: TemplateSpecs): opt.add(output_literals[output_node_id] == False) # Add constraints on the number of output edges + opt.add(Sum(partition_input_edges) <= specs_obj.imax) opt.add(Sum(partition_output_edges) <= omax) + feasibility_constraints = [] for s in edge_w: - if gate_weight[s] <= feasibility_treshold and gate_weight[s] != -1: - print(s, "is feasible", gate_weight[s]) + if gate_weight[s] <= feasibility_treshold: + # print(s, "is feasible", gate_weight[s]) feasibility_constraints.append(edge_constraint[s]) - opt.add(Sum(feasibility_constraints) >= 1) + opt.add(Sum(feasibility_constraints) == Sum(partition_output_edges)) # Generate function to maximize for gate_id in gate_literals: max_func.append(gate_literals[gate_id]) - # Add function to maximize to the solver + # Add function to maximize to the solver opt.maximize(Sum(max_func)) # =========================== Skipping the nodes that are not labeled ================================ @@ -2104,94 +2139,55 @@ def find_subgraph_feasible_soft(self, specs_obj: TemplateSpecs): skipped_nodes.append(Bool(node_literal)) skipped_nodes_constraints = [node_literal == False for node_literal in skipped_nodes] opt.add(skipped_nodes_constraints) - # ==================================================================================================== - # =========================== Coming up with a penalty for each subgraph ============================= - penalty = Int('penalty') - - output_individual_penalty = [] - penalty_coefficient = 1 - for s in edge_w: - if gate_weight[s] > feasibility_treshold: - output_individual_penalty.append(If(gate_literals[s], - penalty_coefficient * (gate_weight[s] - feasibility_treshold), - 0)) - opt.add(penalty == Sum(output_individual_penalty)) - opt.add_soft(Sum(output_individual_penalty) <= 2 * feasibility_treshold, weight=1) - - # ======================================================== - - # opt.add(Sum(max_func) > 1) - # ======================== Check for multiple subgraphs ======================================= - all_partitions = {} - count = specs_obj.num_subgraphs - while count > 0: - node_partition = [] - pprint.info1(f'Attempt {count}: ', end='') - c = opt.check() - if c == sat: - pprint.success(f"subgraph found -> SAT") - # print(opt.model()) - m = opt.model() - # print(f'{m = }') - for t in m.decls(): - if 'penalty' in str(t): - print(f'{t} = {m[t]}') - if 'g' not in str(t): # Look only the literals associate to the gates - continue - if is_true(m[t]): - gate_id = int(str(t)[2:]) - node_partition.append(gate_id) # Gates inside the partition - - else: - pprint.warning("subgraph not found -> UNSAT") - count = 0 - # Check partition convexity - for i in range(len(node_partition) - 1): - for j in range(i + 1, len(node_partition)): - u = node_partition[i] - v = node_partition[j] - try: - path = nx.shortest_path(G, source=u, target=v) - all_nodes_in_partition = True - - # Check that all the nodes in the shortest path are in the partition - for n in path: - if n not in node_partition: - all_nodes_in_partition = False + node_partition = [] + if opt.check() == sat: + pprint.success("subgraph found -> SAT", end='') + # print(opt.model()) + m = opt.model() + for t in m.decls(): + if 'g' not in str(t): # Look only the literals associate to the gates + continue + if is_true(m[t]): + gate_id = int(str(t)[2:]) + node_partition.append(gate_id) # Gates inside the partition + else: + pprint.warning("subgraph not found -> UNSAT") - if not all_nodes_in_partition: - print("Partition is not convex") - exit(0) + # Check partition convexity + for i in range(len(node_partition) - 1): + for j in range(i + 1, len(node_partition)): + u = node_partition[i] + v = node_partition[j] + try: + # print(f'{u = }') + # print(f'{v = }') + # print(f'{G.nodes = }') + # print(f'{G.edges = }') + # print(f'{gate_literals = }') + # print(f'{node_partition = }') + path = nx.shortest_path(G, source=u, target=v) + all_nodes_in_partition = True - except nx.exception.NetworkXNoPath: - pass + # Check that all the nodes in the shortest path are in the partition + for n in path: + if n not in node_partition: + all_nodes_in_partition = False - # ======================================================================== - if c == sat: - block_clause = [d() == True if m[d] else d() == False for d in m.decls() if 'g' in d.name()] - opt.add(Not(And(block_clause))) - current_penalty = m[penalty].as_long() - print(f'{current_penalty}, {node_partition}') - all_partitions[count] = (current_penalty, node_partition) - count -= 1 - # ================================================================ - # =======================Pick the Subgraph with the lowest penalty ============================== - if all_partitions: - sorted_partitions = dict( - sorted( - all_partitions.items(), - key=lambda item: (-len(item[1][1]), item[1][0]) - ) - ) + if not all_nodes_in_partition: + print("Partition is not convex") + exit(0) - for par in sorted_partitions: - print(f'{sorted_partitions[par] = }') - penalty, node_partition = next(iter(sorted_partitions.values())) - print(f'{penalty, node_partition}') + except nx.exception.NetworkXNoPath: + # print('Here') + # except: + # pprint.error(f'Node {u} or {v} do not belong to the graph G {G.nodes}') + # raise nx.exception.NetworkXNoPath + # No path between u and v - # ================================================================ + # print("No path", u, v) + pass for gate_idx in self.gate_dict: # print(f'{gate_idx = }') @@ -2204,7 +2200,697 @@ def find_subgraph_feasible_soft(self, specs_obj: TemplateSpecs): tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE return tmp_graph - def find_subgraph_feasible_soft_outputs(self, specs_obj: TemplateSpecs): + + def find_subgraph_feasible_soft(self, specs_obj: TemplateSpecs): + """ + extracts a colored subgraph from the original non-partitioned graph object + :return: an annotated graph in which the extracted subgraph is colored + """ + omax = specs_obj.omax + feasibility_treshold = specs_obj.et + + tmp_graph = self.graph.copy(as_view=False) + + input_literals = {} # literals associated to the input nodes + gate_literals = {} # literals associated to the gates in the circuit + output_literals = {} # literals associated to the output nodes + + # Data structures containing the edges + input_edges = {} # key = input node id, value = array of id. Contains id of gates in the circuit connected with the input node (childs) + gate_edges = {} # key = gate id, value = array of id. Contains the successors gate (childs) + output_edges = {} # key = output node id, value = array of id. Contains id of gates in the circuit connected with the output node (parents) + + # Optimizer + opt = Optimize() + + # Function to maximize + max_func = [] + + # List of all the partition edges + partition_input_edges = [] # list of all the input edges ([S'D_1 + S'D_2 + ..., ...]) + partition_output_edges = [] # list of all the output edges ([S_1D' + S_2D' + ..., ...]) + + # Generate all literals + for e in tmp_graph.edges: + if 'in' in e[0]: # Generate literal for each input node + in_id = int(e[0][2:]) + if in_id not in input_literals: + input_literals[in_id] = Bool("in_%s" % str(in_id)) + if 'g' in e[0]: # Generate literal for each gate in the circuit + g_id = int(e[0][1:]) + if g_id not in gate_literals and g_id not in self.constant_dict: # Not in constant_dict since we don't care about constants + gate_literals[g_id] = Bool("g_%s" % str(g_id)) + + if 'out' in e[1]: # Generate literal for each output node + out_id = int(e[1][3:]) + if out_id not in output_literals: + output_literals[out_id] = Bool("out_%s" % str(out_id)) + + # Generate structures holding edge information + for e in tmp_graph.edges: + if 'in' in e[0]: # Populate input_edges structure + in_id = int(e[0][2:]) + + if in_id not in input_edges: + input_edges[in_id] = [] + # input_edges[in_id].append(int(e[1][1:])) # this is a bug for a case where e = (in1, out1) + # Morteza added ============== + try: + input_edges[in_id].append(int(e[1][1:])) + except: + if re.search('g(\d+)', e[1]): + my_id = int(re.search('g(\d+)', e[1]).group(1)) + input_edges[in_id].append(my_id) + # ============================= + + if 'g' in e[0] and 'g' in e[1]: # Populate gate_edges structure + ns_id = int(e[0][1:]) + nd_id = int(e[1][1:]) + + if ns_id in self.constant_dict: + print("ERROR: Constants should only be connected to output nodes") + exit(0) + if ns_id not in gate_edges: + gate_edges[ns_id] = [] + # try: + gate_edges[ns_id].append(nd_id) + + if 'out' in e[1]: # Populate output_edges structure + out_id = int(e[1][3:]) + if out_id not in output_edges: + output_edges[out_id] = [] + # output_edges[out_id].append(int(e[0][1:])) + # Morteza added ============== + try: + output_edges[out_id].append(int(e[0][1:])) + except: + my_id = int(re.search('(\d+)', e[0]).group(1)) + output_edges[out_id].append(my_id) + + # ============================= + + # Define input edges + for source in input_edges: + edge_in_holder = [] + edge_out_holder = [] + + for destination in input_edges[source]: + e_in = And(Not(input_literals[source]), gate_literals[destination]) + + edge_in_holder.append(e_in) + + partition_input_edges.append(Or(edge_in_holder)) + + # Define gate edges and data structures containing the edge weights + edge_w = {} + edge_constraint = {} + + for source in gate_edges: + edge_in_holder = [] + edge_out_holder = [] + + for destination in gate_edges[source]: + e_in = And(Not(gate_literals[source]), gate_literals[destination]) + e_out = And(gate_literals[source], Not(gate_literals[destination])) + + edge_in_holder.append(e_in) + edge_out_holder.append(e_out) + + partition_input_edges.append(Or(edge_in_holder)) + if source not in edge_w: + edge_w[source] = tmp_graph.nodes[self.gate_dict[source]][WEIGHT] + + if source not in edge_constraint: + edge_constraint[source] = Or(edge_out_holder) + partition_output_edges.append(Or(edge_out_holder)) + + # Define output edges + for output_id in output_edges: + predecessor = output_edges[output_id][ + 0] # Output nodes have only one predecessor (it could be a gate or it could be an input) + if predecessor not in gate_literals: # This handle cases where input and output are directly connected + continue + e_out = And(gate_literals[predecessor], Not(output_literals[output_id])) + if predecessor not in edge_w: + edge_w[predecessor] = tmp_graph.nodes[self.gate_dict[predecessor]][WEIGHT] + if predecessor not in edge_constraint: + edge_constraint[predecessor] = e_out + partition_output_edges.append(e_out) + + # Create graph of the cicuit without input and output nodes + G = nx.DiGraph() + # print(f'{tmp_graph.edges = }') + for e in tmp_graph.edges: + if 'g' in str(e[0]) and 'g' in str(e[1]): + source = int(e[0][1:]) + destination = int(e[1][1:]) + + G.add_edge(source, destination) + # Morteza added ===================== + for e in tmp_graph.edges: + if 'g' in str(e[0]): + source = int(e[0][1:]) + if source in self.constant_dict: + continue + G.add_node(source) + # =================================== + + # Generate structure with gate weights + # for n in self.graph.nodes: + # print(f'{self.graph.nodes[n][WEIGHT] = }, {n =}') + # print(f'{self.gate_dict = }') + gate_weight = {} + for gate_idx in G.nodes: + + if gate_idx not in gate_weight: + gate_weight[gate_idx] = tmp_graph.nodes[self.gate_dict[gate_idx]][WEIGHT] + # print("Gate", gate_idx, " value ", gate_weight[gate_idx]) + + descendants = {} + ancestors = {} + for n in G: + if n not in descendants: + descendants[n] = list(nx.descendants(G, n)) + if n not in ancestors: + ancestors[n] = list(nx.ancestors(G, n)) + + # Generate convexity constraints + for source in gate_edges: + for destination in gate_edges[source]: + if len(descendants[destination]) > 0: # Constraints on output edges + not_descendants = [Not(gate_literals[l]) for l in descendants[destination]] + not_descendants.append(Not(gate_literals[destination])) + descendat_condition = Implies(And(gate_literals[source], Not(gate_literals[destination])), + And(not_descendants)) + opt.add(descendat_condition) + if len(ancestors[source]) > 0: # Constraints on input edges + not_ancestors = [Not(gate_literals[l]) for l in ancestors[source]] + not_ancestors.append(Not(gate_literals[source])) + ancestor_condition = Implies(And(Not(gate_literals[source]), gate_literals[destination]), + And(not_ancestors)) + opt.add(ancestor_condition) + + # Set input nodes to False + for input_node_id in input_literals: + opt.add(input_literals[input_node_id] == False) + + # Set output nodes to False + for output_node_id in output_literals: + opt.add(output_literals[output_node_id] == False) + + # Add constraints on the number of output edges + opt.add(Sum(partition_output_edges) <= omax) + feasibility_constraints = [] + for s in edge_w: + + if gate_weight[s] <= feasibility_treshold and gate_weight[s] != -1: + print(s, "is feasible", gate_weight[s]) + feasibility_constraints.append(edge_constraint[s]) + + opt.add(Sum(feasibility_constraints) >= 1) + + # Generate function to maximize + for gate_id in gate_literals: + max_func.append(gate_literals[gate_id]) + # Add function to maximize to the solver + + opt.maximize(Sum(max_func)) + + # =========================== Skipping the nodes that are not labeled ================================ + skipped_nodes = [] + for node in self.graph.nodes: + if self.graph.nodes[node][WEIGHT] == -1: + if node.startswith('g'): + node_literal = f'{node[0:1]}_{node[1:]}' + elif node.startswith('in'): + node_literal = f'{node[0:2]}_{node[2:]}' + elif node.startswith('out'): + node_literal = f'{node[0:3]}_{node[3:]}' + else: + print(f'Node is neither input, output, nor gate') + raise + skipped_nodes.append(Bool(node_literal)) + skipped_nodes_constraints = [node_literal == False for node_literal in skipped_nodes] + opt.add(skipped_nodes_constraints) + + # ==================================================================================================== + + # =========================== Coming up with a penalty for each subgraph ============================= + penalty = Int('penalty') + + output_individual_penalty = [] + penalty_coefficient = 1 + for s in edge_w: + if gate_weight[s] > feasibility_treshold: + output_individual_penalty.append(If(gate_literals[s], + penalty_coefficient * (gate_weight[s] - feasibility_treshold), + 0)) + opt.add(penalty == Sum(output_individual_penalty)) + opt.add_soft(Sum(output_individual_penalty) <= 2 * feasibility_treshold, weight=1) + + # ======================================================== + + # opt.add(Sum(max_func) > 1) + # ======================== Check for multiple subgraphs ======================================= + all_partitions = {} + count = specs_obj.num_subgraphs + while count > 0: + node_partition = [] + pprint.info1(f'Attempt {count}: ', end='') + c = opt.check() + if c == sat: + pprint.success(f"subgraph found -> SAT") + # print(opt.model()) + m = opt.model() + # print(f'{m = }') + for t in m.decls(): + if 'penalty' in str(t): + print(f'{t} = {m[t]}') + if 'g' not in str(t): # Look only the literals associate to the gates + continue + if is_true(m[t]): + gate_id = int(str(t)[2:]) + node_partition.append(gate_id) # Gates inside the partition + + else: + pprint.warning("subgraph not found -> UNSAT") + count = 0 + # Check partition convexity + for i in range(len(node_partition) - 1): + for j in range(i + 1, len(node_partition)): + u = node_partition[i] + v = node_partition[j] + try: + path = nx.shortest_path(G, source=u, target=v) + all_nodes_in_partition = True + + # Check that all the nodes in the shortest path are in the partition + for n in path: + if n not in node_partition: + all_nodes_in_partition = False + + if not all_nodes_in_partition: + print("Partition is not convex") + exit(0) + + except nx.exception.NetworkXNoPath: + pass + + # ======================================================================== + if c == sat: + block_clause = [d() == True if m[d] else d() == False for d in m.decls() if 'g' in d.name()] + opt.add(Not(And(block_clause))) + current_penalty = m[penalty].as_long() + print(f'{current_penalty}, {node_partition}') + all_partitions[count] = (current_penalty, node_partition) + count -= 1 + # ================================================================ + # =======================Pick the Subgraph with the lowest penalty ============================== + if all_partitions: + sorted_partitions = dict( + sorted( + all_partitions.items(), + key=lambda item: (-len(item[1][1]), item[1][0]) + ) + ) + + for par in sorted_partitions: + print(f'{sorted_partitions[par] = }') + penalty, node_partition = next(iter(sorted_partitions.values())) + print(f'{penalty, node_partition}') + + # ================================================================ + + for gate_idx in self.gate_dict: + # print(f'{gate_idx = }') + if gate_idx in node_partition: + # print(f'{gate_idx} is in the node_partition') + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 1 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = RED + else: + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 0 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE + return tmp_graph + + def find_subgraph_feasible_soft_outputs(self, specs_obj: TemplateSpecs): + """ + extracts a colored subgraph from the original non-partitioned graph object + :return: an annotated graph in which the extracted subgraph is colored + """ + omax = specs_obj.omax + feasibility_treshold = specs_obj.et + + tmp_graph = self.graph.copy(as_view=False) + + input_literals = {} # literals associated to the input nodes + gate_literals = {} # literals associated to the gates in the circuit + output_literals = {} # literals associated to the output nodes + + # Data structures containing the edges + input_edges = {} # key = input node id, value = array of id. Contains id of gates in the circuit connected with the input node (childs) + gate_edges = {} # key = gate id, value = array of id. Contains the successors gate (childs) + output_edges = {} # key = output node id, value = array of id. Contains id of gates in the circuit connected with the output node (parents) + + # Optimizer + opt = Optimize() + + # Function to maximize + max_func = [] + + # List of all the partition edges + partition_input_edges = [] # list of all the input edges ([S'D_1 + S'D_2 + ..., ...]) + partition_output_edges = [] # list of all the output edges ([S_1D' + S_2D' + ..., ...]) + partition_output_edges_penalty = [] + + # Generate all literals + for e in tmp_graph.edges: + if 'in' in e[0]: # Generate literal for each input node + in_id = int(e[0][2:]) + if in_id not in input_literals: + input_literals[in_id] = Bool("in_%s" % str(in_id)) + if 'g' in e[0]: # Generate literal for each gate in the circuit + g_id = int(e[0][1:]) + if g_id not in gate_literals and g_id not in self.constant_dict: # Not in constant_dict since we don't care about constants + gate_literals[g_id] = Bool("g_%s" % str(g_id)) + + if 'out' in e[1]: # Generate literal for each output node + out_id = int(e[1][3:]) + if out_id not in output_literals: + output_literals[out_id] = Bool("out_%s" % str(out_id)) + + # Generate structures holding edge information + for e in tmp_graph.edges: + if 'in' in e[0]: # Populate input_edges structure + in_id = int(e[0][2:]) + + if in_id not in input_edges: + input_edges[in_id] = [] + # input_edges[in_id].append(int(e[1][1:])) # this is a bug for a case where e = (in1, out1) + # Morteza added ============== + try: + input_edges[in_id].append(int(e[1][1:])) + except: + if re.search('g(\d+)', e[1]): + my_id = int(re.search('g(\d+)', e[1]).group(1)) + input_edges[in_id].append(my_id) + # ============================= + + if 'g' in e[0] and 'g' in e[1]: # Populate gate_edges structure + ns_id = int(e[0][1:]) + nd_id = int(e[1][1:]) + + if ns_id in self.constant_dict: + print("ERROR: Constants should only be connected to output nodes") + exit(0) + if ns_id not in gate_edges: + gate_edges[ns_id] = [] + # try: + gate_edges[ns_id].append(nd_id) + + if 'out' in e[1]: # Populate output_edges structure + out_id = int(e[1][3:]) + if out_id not in output_edges: + output_edges[out_id] = [] + # output_edges[out_id].append(int(e[0][1:])) + # Morteza added ============== + try: + output_edges[out_id].append(int(e[0][1:])) + except: + my_id = int(re.search('(\d+)', e[0]).group(1)) + output_edges[out_id].append(my_id) + + # ============================= + + # Define input edges + for source in input_edges: + edge_in_holder = [] + edge_out_holder = [] + + for destination in input_edges[source]: + e_in = And(Not(input_literals[source]), gate_literals[destination]) + + edge_in_holder.append(e_in) + + partition_input_edges.append(Or(edge_in_holder)) + + # Define gate edges and data structures containing the edge weights + edge_w = {} + edge_constraint = {} + + for source in gate_edges: + # print(f'{gate_literals[source] = }') + # print(f'{tmp_graph.nodes[self.gate_dict[source]][WEIGHT] = }') + edge_in_holder = [] + edge_out_holder = [] + + for destination in gate_edges[source]: + e_in = And(Not(gate_literals[source]), gate_literals[destination]) + e_out = And(gate_literals[source], Not(gate_literals[destination])) + + edge_in_holder.append(e_in) + edge_out_holder.append(e_out) + + partition_input_edges.append(Or(edge_in_holder)) + if source not in edge_w: + edge_w[source] = tmp_graph.nodes[self.gate_dict[source]][WEIGHT] + + if source not in edge_constraint: + edge_constraint[source] = Or(edge_out_holder) + partition_output_edges.append(Or(edge_out_holder)) + if tmp_graph.nodes[self.gate_dict[source]][WEIGHT] > feasibility_treshold: + this_output_penalty = tmp_graph.nodes[self.gate_dict[source]][WEIGHT] - feasibility_treshold + partition_output_edges_penalty.append(Or(edge_out_holder) * this_output_penalty) + # else: + # partition_output_edges_penalty.append(Or(edge_out_holder) * IntVal(0)) + + # Define output edges + for output_id in output_edges: + predecessor = output_edges[output_id][ + 0] # Output nodes have only one predecessor (it could be a gate or it could be an input) + + if predecessor not in gate_literals: # This handle cases where input and output are directly connected + continue + e_out = And(gate_literals[predecessor], Not(output_literals[output_id])) + if predecessor not in edge_w: + edge_w[predecessor] = tmp_graph.nodes[self.gate_dict[predecessor]][WEIGHT] + if predecessor not in edge_constraint: + edge_constraint[predecessor] = e_out + + partition_output_edges.append(e_out) + + if tmp_graph.nodes[self.gate_dict[predecessor]][WEIGHT] > feasibility_treshold: + this_output_penalty = tmp_graph.nodes[self.gate_dict[predecessor]][WEIGHT] - feasibility_treshold + partition_output_edges_penalty.append(e_out * this_output_penalty) + # else: + # partition_output_edges_penalty.append(e_out * IntVal(0)) + + # Create graph of the cicuit without input and output nodes + G = nx.DiGraph() + # print(f'{tmp_graph.edges = }') + for e in tmp_graph.edges: + if 'g' in str(e[0]) and 'g' in str(e[1]): + source = int(e[0][1:]) + destination = int(e[1][1:]) + + G.add_edge(source, destination) + # Morteza added ===================== + for e in tmp_graph.edges: + if 'g' in str(e[0]): + source = int(e[0][1:]) + if source in self.constant_dict: + continue + G.add_node(source) + # =================================== + + # Generate structure with gate weights + # for n in self.graph.nodes: + # print(f'{self.graph.nodes[n][WEIGHT] = }, {n =}') + # print(f'{self.gate_dict = }') + gate_weight = {} + for gate_idx in G.nodes: + + if gate_idx not in gate_weight: + gate_weight[gate_idx] = tmp_graph.nodes[self.gate_dict[gate_idx]][WEIGHT] + # print("Gate", gate_idx, " value ", gate_weight[gate_idx]) + + descendants = {} + ancestors = {} + for n in G: + if n not in descendants: + descendants[n] = list(nx.descendants(G, n)) + if n not in ancestors: + ancestors[n] = list(nx.ancestors(G, n)) + + # Generate convexity constraints + for source in gate_edges: + for destination in gate_edges[source]: + if len(descendants[destination]) > 0: # Constraints on output edges + not_descendants = [Not(gate_literals[l]) for l in descendants[destination]] + not_descendants.append(Not(gate_literals[destination])) + descendat_condition = Implies(And(gate_literals[source], Not(gate_literals[destination])), + And(not_descendants)) + opt.add(descendat_condition) + if len(ancestors[source]) > 0: # Constraints on input edges + not_ancestors = [Not(gate_literals[l]) for l in ancestors[source]] + not_ancestors.append(Not(gate_literals[source])) + ancestor_condition = Implies(And(Not(gate_literals[source]), gate_literals[destination]), + And(not_ancestors)) + opt.add(ancestor_condition) + + # Set input nodes to False + for input_node_id in input_literals: + opt.add(input_literals[input_node_id] == False) + + # Set output nodes to False + for output_node_id in output_literals: + opt.add(output_literals[output_node_id] == False) + + # Add constraints on the number of output edges + opt.add(Sum(partition_output_edges) <= omax) + + feasibility_constraints = [] + for s in edge_w: + + if gate_weight[s] <= feasibility_treshold and gate_weight[s] != -1: + # print(s, "is feasible", gate_weight[s]) + feasibility_constraints.append(edge_constraint[s]) + + opt.add(Sum(feasibility_constraints) >= 1) + + # Generate function to maximize + for gate_id in gate_literals: + max_func.append(gate_literals[gate_id]) + # Add function to maximize to the solver + + opt.maximize(Sum(max_func)) + + # =========================== Skipping the nodes that are not labeled ================================ + skipped_nodes = [] + for node in self.graph.nodes: + if self.graph.nodes[node][WEIGHT] == -1: + if node.startswith('g'): + node_literal = f'{node[0:1]}_{node[1:]}' + elif node.startswith('in'): + node_literal = f'{node[0:2]}_{node[2:]}' + elif node.startswith('out'): + node_literal = f'{node[0:3]}_{node[3:]}' + else: + print(f'Node is neither input, output, nor gate') + raise + skipped_nodes.append(Bool(node_literal)) + skipped_nodes_constraints = [node_literal == False for node_literal in skipped_nodes] + opt.add(skipped_nodes_constraints) + + # ==================================================================================================== + + # =========================== Coming up with a penalty for each subgraph ============================= + penalty_output = Int('penalty_output') + penalty_gate = Int('penalty_gate') + + output_individual_penalty = [] + penalty_coefficient = 1 + for s in edge_w: + if gate_weight[s] > feasibility_treshold: + output_individual_penalty.append(If(gate_literals[s], + penalty_coefficient * (gate_weight[s] - feasibility_treshold), + 0)) + + opt.add(penalty_output == Sum(partition_output_edges_penalty)) + # Why IntVal(1)? => Because sometimes the Sum results into an integer "Python number (e.g., int)", but we need a "Z3 number (e.g., ArithRef)" + opt.add_soft( IntVal(1)* Sum(partition_output_edges_penalty) <= omax * feasibility_treshold, weight = 100) + opt.add(penalty_gate == Sum(output_individual_penalty)) + opt.add_soft(IntVal(1)* Sum(output_individual_penalty) <= omax * feasibility_treshold, weight=1) + + # ======================================================== + # ======================== Check for multiple subgraphs ======================================= + all_partitions = {} + count = specs_obj.num_subgraphs + while count > 0: + node_partition = [] + pprint.info1(f'Attempt {specs_obj.num_subgraphs - count + 1}: ', end='') + c = opt.check() + if c == sat: + pprint.success(f"subgraph found -> SAT") + # print(opt.model()) + m = opt.model() + # print(f'{m = }') + for t in m.decls(): + if 'penalty_output' in str(t): + # print(f'{t} = {m[t]}') + penalty_output = m[t].as_long() + pass + if 'penalty_gate' in str(t): + # print(f'{t} = {m[t]}') + penalty_gate = m[t].as_long() + if 'g' not in str(t): # Look only the literals associate to the gates + continue + if is_true(m[t]): + gate_id = int(str(t)[2:]) + node_partition.append(gate_id) # Gates inside the partition + + else: + pprint.warning("subgraph not found -> UNSAT") + count = 0 + # Check partition convexity + for i in range(len(node_partition) - 1): + for j in range(i + 1, len(node_partition)): + u = node_partition[i] + v = node_partition[j] + try: + path = nx.shortest_path(G, source=u, target=v) + all_nodes_in_partition = True + + # Check that all the nodes in the shortest path are in the partition + for n in path: + if n not in node_partition: + all_nodes_in_partition = False + + if not all_nodes_in_partition: + print("Partition is not convex") + exit(0) + + except nx.exception.NetworkXNoPath: + pass + + # ======================================================================== + if c == sat: + block_clause = [d() == True if m[d] else d() == False for d in m.decls() if 'g_' in d.name()] + opt.add(Not(And(block_clause))) + + all_partitions[count] = (penalty_output, penalty_gate, node_partition) + count -= 1 + # ================================================================ + # =======================Pick the Subgraph with the lowest penalty ==============================2 + sorted_partitions = {} + if all_partitions: + sorted_partitions = dict( + sorted( + all_partitions.items(), + key=lambda item: (-len(item[1][2]), item[1][0], item[1][1]) + ) + ) + + for par in sorted_partitions: + print(f'{sorted_partitions[par] = }') + + first_key = next(iter(sorted_partitions)) + penalty_output, penalty_gate, node_partition = sorted_partitions.pop(first_key) + + # ================================================================ + self.subgraph_candidates = sorted_partitions + + for gate_idx in self.gate_dict: + # print(f'{gate_idx = }') + if gate_idx in node_partition: + # print(f'{gate_idx} is in the node_partition') + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 1 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = RED + else: + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 0 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE + return tmp_graph + + def find_subgraph_feasible_soft_outputs_limited_inputs(self, specs_obj: TemplateSpecs): """ extracts a colored subgraph from the original non-partitioned graph object :return: an annotated graph in which the extracted subgraph is colored @@ -2418,6 +3104,7 @@ def find_subgraph_feasible_soft_outputs(self, specs_obj: TemplateSpecs): opt.add(output_literals[output_node_id] == False) # Add constraints on the number of output edges + opt.add(Sum(partition_input_edges) <= specs_obj.imax) opt.add(Sum(partition_output_edges) <= omax) feasibility_constraints = [] diff --git a/sxpat/synthesis.py b/sxpat/synthesis.py index 277561cd9..b5afe649e 100644 --- a/sxpat/synthesis.py +++ b/sxpat/synthesis.py @@ -344,6 +344,11 @@ def __subgraph_inputs_assigns_shared(self): s_inputs_assigns += self.__get_fanin_cone(n) return s_inputs_assigns + + def key_funciton_for_sorting(self, el): + idx = int(re.search(r'(\d+)$', el).group(1)) + return idx + def __fix_order(self): subpgraph_input_list = list(self.graph.subgraph_input_dict.values()) subpgraph_input_list_ordered = [] @@ -357,7 +362,9 @@ def __fix_order(self): else: g_list.append(node) - pi_list.sort(key=lambda x: int(re.search('\d+', x).group())) + # pi_list.sort(key=lambda x: int(re.search('\d+', x).group())) + pi_list = sorted(pi_list, key=self.key_funciton_for_sorting) + g_list = sorted(g_list, key=self.key_funciton_for_sorting) for el in pi_list: subpgraph_input_list_ordered.append(el) for el in g_list: @@ -366,14 +373,14 @@ def __fix_order(self): def __subgraph_to_json_input_mapping(self): sub_to_json = f'//mapping subgraph inputs to json inputs\n' - + # print(f'{sub_to_json = }') subgraph_input_list = list(self.graph.subgraph_input_dict.values()) subgraph_input_list = self.__fix_order() - + # print(f'{ subgraph_input_list = }') for idx in range(self.graph.subgraph_num_inputs): sub_to_json += f'{sxpatconfig.VER_ASSIGN} {sxpatconfig.VER_JSON_WIRE_PREFIX}{sxpatconfig.VER_INPUT_PREFIX}{idx} = ' \ f'{sxpatconfig.VER_WIRE_PREFIX}{subgraph_input_list[idx]};\n' - + # print(f'{sub_to_json = }') return sub_to_json def __json_model_lpp_and_subgraph_output_assigns(self, idx: int = 0): diff --git a/sxpat/templateCreator.py b/sxpat/templateCreator.py index 616c158bf..57cb5805b 100644 --- a/sxpat/templateCreator.py +++ b/sxpat/templateCreator.py @@ -734,6 +734,11 @@ def __z3_get_subgraph_input_list(self): idx] = f"{self.__z3_get_approximate_label(inp)}({', '.join(list(self.current_graph.input_dict.values()))})" return input_list_tmp + + def key_funciton_for_sorting(self, el): + idx = int(re.search(r'(\d+)$', el).group(1)) + return idx + def __fix_order(self): subpgraph_input_list = list(self.current_graph.subgraph_input_dict.values()) subpgraph_input_list_ordered = [] @@ -742,18 +747,24 @@ def __fix_order(self): for node in subpgraph_input_list: if re.search('in(\d+)', node): - idx = int(re.search('in(\d+)', node).group(1)) pi_list.append(node) else: g_list.append(node) - pi_list.sort(key=lambda x: re.search('\d+', x).group()) + # pi_list.sort(key=key_funciton_for_sorting) + # print(f'{pi_list = }') + # print(f'{g_list = }') + pi_list = sorted(pi_list, key=self.key_funciton_for_sorting) + g_list = sorted(g_list, key=self.key_funciton_for_sorting) + # pi_list.sort(key=lambda x: re.search('\d+', x).group()) + # print(f'{pi_list = }') + # print(f'{g_list = }') for el in pi_list: subpgraph_input_list_ordered.append(el) for el in g_list: subpgraph_input_list_ordered.append(el) - + # print(f'{subpgraph_input_list_ordered = }') return subpgraph_input_list_ordered def z3_express_node_as_wire_constraints_subxpat(self, node: str): @@ -1768,22 +1779,26 @@ def z3_generate_stats(self): return stats def z3_generate_config(self): + print(f'{self.current_graph.num_outputs = }') config = '' config += f'ET = int(sys.argv[1])\n' \ + f"print(f'{{ET = }}')\n" \ f'wanted_models: int = 1 if len(sys.argv) < 3 else int(sys.argv[2])\n' \ f'timeout: float = float(sys.maxsize if len(sys.argv) < 4 else sys.argv[3])\n' \ - f'max_possible_ET: int = 2 ** 3 - 1\n' \ + f'max_possible_ET: int = 2 ** {self.current_graph.num_outputs} - 1\n' \ f'\n' return config def z3_generate_config_bitvec(self): + # print(f'{self.current_graph.num_outputs = }') config = '' config += f'ET = int(sys.argv[1])\n' \ + f"print(f'{{ET = }}')\n" \ f'ET_BV = BitVecVal(ET, {self.exact_graph.num_outputs})\n' \ f'wanted_models: int = 1 if len(sys.argv) < 3 else int(sys.argv[2])\n' \ f'timeout: float = float(sys.maxsize if len(sys.argv) < 4 else sys.argv[3])\n' \ - f'max_possible_ET: int = 2 ** 3 - 1\n' \ + f'max_possible_ET: int = 2 ** {self.exact_graph.num_outputs} - 1\n' \ f'\n' return config @@ -3525,11 +3540,12 @@ def z3_generate_stats(self): # NM def z3_generate_config(self): + print(f'{self.current_graph.num_outputs = }') config = '' config += f'ET = int(sys.argv[1])\n' \ f'wanted_models: int = 1 if len(sys.argv) < 3 else int(sys.argv[2])\n' \ f'timeout: float = float(sys.maxsize if len(sys.argv) < 4 else sys.argv[3])\n' \ - f'max_possible_ET: int = 2 ** 3 - 1\n' \ + f'max_possible_ET: int = 2 ** {self.current_graph.num_outputs} - 1\n' \ f'\n' return config diff --git a/sxpat/xplore.py b/sxpat/xplore.py index f06a99443..697a29099 100644 --- a/sxpat/xplore.py +++ b/sxpat/xplore.py @@ -82,7 +82,13 @@ def explore_grid(specs_obj: TemplateSpecs): i = 0 prev_actual_error = 0 prev_given_error = 0 - max_unsat_reached = False + + max_et = specs_obj.et + error_exceeded = False + loop_detected = False + persistance_limit = 3 + persistance = 1 + prev_et = -1 while (obtained_wce_exact < available_error): i += 1 if specs_obj.et_partitioning == 'asc': @@ -92,32 +98,48 @@ def explore_grid(specs_obj: TemplateSpecs): log2 = int(math.log2(specs_obj.et)) et = 2**(log2 - i - 2) elif specs_obj.et_partitioning == 'smart_asc': - et = 1 if i == 1 else prev_given_error + (1 if prev_actual_error == 0 else 0) - if available_error < et*2: - et = available_error - prev_given_error = et + if max_et >= 8: + et = max_et // 8 if i == 1 else et + (max_et // 8 if (loop_detected or prev_actual_error == 0 or persistance == persistance_limit) else 0 ) + if prev_et == et: + persistance += 1 + else: + persistance = 0 + prev_et = et + else: + et = 1 if i == 1 else et + (1 if (loop_detected or prev_actual_error == 0 or persistance == persistance_limit) else 0 ) + if prev_et == et: + persistance += 1 + else: + persistance = 0 + prev_et = et elif specs_obj.et_partitioning == 'smart_desc': - et = available_error if i == 1 else prev_given_error - (1 if prev_actual_error == 0 else 0) - if et > available_error: - et = available_error - prev_given_error = et + et = available_error if i == 1 else et // (2 if prev_actual_error == 0 else 1) + # if et > available_error: + # et = available_error + # prev_given_error = et else: raise NotImplementedError('invalid status') + if et > available_error or et < 0: + break + + + pprint.info1(f'iteration {i} with et {et}, available error {available_error}' if (specs_obj.subxpat or specs_obj.subxpat_v2) else f'Only one iteration with et {et}') - if max_unsat_reached and not template_obj.is_two_phase_kind: - break + max_unsat_reached = False # for all candidates + # print(f'{current_population = }') for candidate in current_population: # guard - if pre_iter_unsats[candidate] == total_number_of_cells_per_iter and not specs_obj.keep_unsat_candidate: - pprint.info1(f'Number of UNSATs reached!') - max_unsat_reached = True - continue + # if pre_iter_unsats[candidate] == total_number_of_cells_per_iter and not specs_obj.keep_unsat_candidate: + # pprint.info1(f'Number of UNSATs reached!') + # max_unsat_reached = True + # pre_iter_unsats[candidate] = 0 + # continue pprint.info1(f'candidate {candidate}') if candidate.endswith('.v'): @@ -137,7 +159,7 @@ def explore_grid(specs_obj: TemplateSpecs): # label graph t_start = time.time() - et_coefficient = 8 + et_coefficient = 2 template_obj.label_graph(min_labeling=specs_obj.min_labeling, partial=specs_obj.partial_labeling, et=et*et_coefficient, parallel=specs_obj.parallel) labeling_time = time.time() - t_start @@ -147,7 +169,8 @@ def explore_grid(specs_obj: TemplateSpecs): t_start = time.time() subgraph_is_available = template_obj.current_graph.extract_subgraph(specs_obj) - previous_subgraphs.append(template_obj.current_graph.subgraph) + + subgraph_extraction_time = time.time() - t_start subgraph_number_inputs = template_obj.current_graph.subgraph_num_inputs subgraph_number_outputs = template_obj.current_graph.subgraph_num_outputs @@ -169,7 +192,7 @@ def explore_grid(specs_obj: TemplateSpecs): if et == available_error: available_error = 0 continue - + previous_subgraphs.append(template_obj.current_graph.subgraph) # run grid phase if template_obj.is_two_phase_kind: # todo:wip:marco @@ -215,7 +238,8 @@ def explore_grid(specs_obj: TemplateSpecs): # run script template_obj.z3_generate_z3pyscript() v1_start = time.time() - template_obj.run_z3pyscript(ET=specs_obj.et, num_models=specs_obj.num_of_models, timeout=specs_obj.timeout) + assert specs_obj.et == et, pprint.error(f'{et} != {specs_obj.et}') + template_obj.run_z3pyscript(ET=et, num_models=specs_obj.num_of_models, timeout=specs_obj.timeout) v1_end = time.time() subxpat_v1_time = v1_end - v1_start @@ -314,13 +338,19 @@ def explore_grid(specs_obj: TemplateSpecs): for candidate in cur_model_results: approximate_benchmark = candidate[:-2] - obtained_wce_exact = erroreval_verification_wce(exact_file_name, approximate_benchmark, template_obj.et) - if specs_obj.subxpat_v2: - obtained_wce_prev = erroreval_verification_wce(specs_obj.exact_benchmark, approximate_benchmark, template_obj.et) - prev_actual_error = obtained_wce_prev - + obtained_wce_exact = erroreval_verification_wce(exact_file_name, approximate_benchmark, specs_obj.et) if obtained_wce_exact > template_obj.et: - raise Exception(color.error('ErrorEval Verification: FAILED!')) + pprint.error(f'ErrorEval Verification: FAILED! {obtained_wce_exact = } < {template_obj.et = }') + # = '.txt' + file_name = f'output/report/FAILED_{stats_obj.get_grid_name()[:-4]}_{obtained_wce_exact}>{template_obj.et}.txt' + with open(f'{file_name}', 'w'): + pass + exit() + pprint.info3(f'Evaluating the local WCE between the input and the output of this iteration...') + obtained_wce_prev = erroreval_verification_wce(specs_obj.exact_benchmark, approximate_benchmark, template_obj.et) + prev_actual_error = obtained_wce_prev + pprint.info3(f'Local WCE = {obtained_wce_prev}') + this_model = Model(id=0, status=cur_status.upper(), cell=(lpp, ppo), et=obtained_wce_exact, iteration=i, area=cur_model_results[synth_obj.ver_out_name][0], @@ -334,7 +364,7 @@ def explore_grid(specs_obj: TemplateSpecs): subxpat_phase2_time=subxpat_phase2_time, subxpat_v1_time=subxpat_v1_time) stats_obj.grid.cells[lpp][ppo].store_model_info(this_model) - print(f'{subxpat_v1_time = }') + prev_string = '' if specs_obj.subxpat_v2: sum_wce_actual += obtained_wce_prev @@ -384,15 +414,19 @@ def explore_grid(specs_obj: TemplateSpecs): loop_detected = False for idx, s in enumerate(previous_subgraphs): if idx == len(previous_subgraphs) - 1 and idx > 1: - if nx.utils.graphs_equal(previous_subgraphs[idx-2], previous_subgraphs[idx-1]) and \ - nx.utils.graphs_equal(previous_subgraphs[idx-2], previous_subgraphs[idx-3]): + if nx.utils.graphs_equal(previous_subgraphs[idx-1], previous_subgraphs[idx]) and \ + nx.utils.graphs_equal(previous_subgraphs[idx], previous_subgraphs[idx-2]): print(f'The last three subgraphs are equal') pprint.info3(f'The last three subgraphs are equal!') pprint.info3(f'Terminating the exploration!') loop_detected = True break - if loop_detected: - break + # if loop_detected: + # break + + # if max_unsat_reached and (et >= max_et or et <= 0) and not template_obj.is_two_phase_kind: + # max_unsat_reached = False + # break # display_the_tree(total) From ede5c4e739abcc13626e9fd03f472ab48abd654f Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Thu, 27 Jun 2024 12:56:27 +0200 Subject: [PATCH 4/7] linear-exponential et partitioning added - naming bug fixed! --- analyze.py | 103 +++++++++++++++++++++++++++++++--------- sxpat/annotatedGraph.py | 59 ++++++++++++++++++----- sxpat/xplore.py | 69 ++++++++++++++++++++++----- 3 files changed, 183 insertions(+), 48 deletions(-) diff --git a/analyze.py b/analyze.py index b7ceaa61b..7b8f52d5b 100644 --- a/analyze.py +++ b/analyze.py @@ -20,23 +20,30 @@ def main(): folder = 'experiments/results' rel_files = get_relevant_files(args, folder) - area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) - plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, mecals_area_error) - - - error_cells_subgraph_inputs_dict_per_mode = get_subgraph_inputs_vs_sat_cells(rel_files, folder) - report_sat_cells_vs_subgraph_inputs(args, error_cells_subgraph_inputs_dict_per_mode) - - + # area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) + # plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=None, + # mecals_area_error=mecals_area_error) + # area_error_mode_last_points_dict = get_area_error_per_mode_per_grid_last_points(rel_files, folder) + # plot_area_error_mode_per_grid_dict(args, area_error_mode_last_points_dict, area_error_mode_last_points_dict=None, mecals_area_error=mecals_area_error) + # + # + # + # + # + # error_cells_subgraph_inputs_dict_per_mode = get_subgraph_inputs_vs_sat_cells(rel_files, folder) + # report_sat_cells_vs_subgraph_inputs(args, error_cells_subgraph_inputs_dict_per_mode) + # + # + # # error_cells_dict_per_mode = get_sat_cells(rel_files, folder) # plot_cells_per_mode(args, error_cells_dict_per_mode) - - - - get_runtime_per_mode_decomposition(rel_files, folder) - sorted_runtime_error_per_mode = get_runtime_per_mode(rel_files, folder) - plot_runtime_error_per_mode(args, sorted_runtime_error_per_mode) + # + # + # # + # get_runtime_per_mode_decomposition(rel_files, folder) + # sorted_runtime_error_per_mode = get_runtime_per_mode(rel_files, folder) + # plot_runtime_error_per_mode(args, sorted_runtime_error_per_mode) @@ -46,8 +53,8 @@ def main(): # area_error_ns_dict = get_area_error_per_num_subgraphs(rel_files, folder) # plot_area_error_per_ns(args, area_error_ns_dict) - area_error_mode_dict = get_area_error_per_mode(rel_files, folder) - plot_area_error_per_mode(args, area_error_mode_dict) + # area_error_mode_dict = get_area_error_per_mode(rel_files, folder) + # plot_area_error_per_mode(args, area_error_mode_dict) # sorted_aggregated_runtime_error = get_runtime_aggregated(rel_files, folder) # plot_runtime_aggregated(args, sorted_aggregated_runtime_error) @@ -79,7 +86,7 @@ def get_relevant_files(args: Arguments, folder): def get_sat_cells(relevant_files, folder): error_cell_dict_per_mode: Dict = {} error_cell_dict: Dict = {} - for mode in range(20): + for mode in range(200): for rep in relevant_files: if re.search(f'mode{mode}_', rep): with open(f'{folder}/{rep}', 'r') as f: @@ -109,7 +116,7 @@ def get_sat_cells(relevant_files, folder): def get_subgraph_inputs_vs_sat_cells(relevant_files, folder): error_cell_dict_per_mode: Dict = {} error_cell_dict: Dict = {} - for mode in range(20): + for mode in range(200): for rep in relevant_files: if re.search(f'mode{mode}_', rep): with open(f'{folder}/{rep}', 'r') as f: @@ -245,17 +252,55 @@ def get_area_error_per_mode(relevant_files, folder): return area_error_mode_dict -def get_area_error_per_mode_grid(relevant_files, folder): +def get_area_error_per_mode_per_grid_last_points(relevant_files, folder): area_error_mode_per_grid_dict: Dict[int: [float, int]] = {} area_error_per_grid = {} grids = [] for mode in range(200): for rep in relevant_files: + last_point = False + min_area = float('inf') + max_error = 0 if re.search(f'mode{mode}_', rep): + et = int(re.search(f'et(\d+)_', rep).group(1)) grid = re.search(f'(\d+X\d+)', rep).group() if grid not in grids: grids.append(grid) + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + # error = int(line[8]) + + if area < min_area: + min_area = area + last_point = (min_area, et) + if grid not in area_error_per_grid.keys(): + area_error_per_grid[grid] = [] + area_error_per_grid[grid].append(last_point) + if area_error_per_grid: + area_error_mode_per_grid_dict[mode] = area_error_per_grid + area_error_per_grid = {} + return area_error_mode_per_grid_dict + +def get_area_error_per_mode_grid(relevant_files, folder): + area_error_mode_per_grid_dict: Dict[int: [float, int]] = {} + area_error_per_grid = {} + grids = [] + print(f'{relevant_files = }') + print(f'{len(relevant_files) = }') + + for mode in range(200): + for rep in relevant_files: + if re.search(f'mode{mode}_', rep): + grid = re.search(f'(\d+X\d+)', rep).group() + if grid not in grids: + grids.append(grid) with open(f'{folder}/{rep}', 'r') as f: csvreader = csv.reader(f) for line in csvreader: @@ -270,7 +315,6 @@ def get_area_error_per_mode_grid(relevant_files, folder): area_error_per_grid[grid] = [] area_error_per_grid[grid].append((area, error)) - if area_error_per_grid: area_error_mode_per_grid_dict[mode] = area_error_per_grid area_error_per_grid = {} @@ -285,7 +329,7 @@ def get_runtime_per_mode(relevant_files, folder): runtime_error_dict: Dict[Tuple[int, float]] = {} runtime_error_per_mode_dict = {} - for mode in range(20): + for mode in range(200): for rep in relevant_files: if re.search(f'mode{mode}_', rep): with open(f'{folder}/{rep}', 'r') as f: @@ -443,6 +487,7 @@ def plot_area_error(args, sorted_area_error: List, mecals_area_error: List = Non if mecals_area_error: mecals_areas = [item[0] for item in mecals_area_error] mecals_errors = [item[1] for item in mecals_area_error] + print(f'{mecals_errors = }') plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') plt.legend() @@ -526,7 +571,7 @@ def plot_area_error_per_mode(args, area_error_mode_dict: Dict): plt.savefig(figname) -def plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict: Dict, mecals_area_error = None): +def plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict: Dict, area_error_mode_last_points_dict: Dict = None, mecals_area_error = None): plt.figure(figsize=(10, 6)) plt.title(f'{args.benchmark_name} per mode and grid') marker = itertools.cycle(('D', 'o', 'X', 's')) @@ -546,12 +591,24 @@ def plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict: Dict plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') plt.legend() + if area_error_mode_last_points_dict: + for mode in area_error_mode_last_points_dict.keys(): + sorted_area_error_per_mode = area_error_mode_last_points_dict[mode] + for grid in sorted_area_error_per_mode.keys(): + sorted_area_error = sorted_area_error_per_mode[grid] + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + plt.scatter(errors, areas, marker='s', label=f'mode{mode}, grid{grid}', alpha=0.5) + plt.legend() plt.xlabel('ET') plt.ylabel('Area') plt.grid(True) folder, _ = OUTPUT_PATH['figure'] - figname = f'{folder}/{args.benchmark_name}_area_per_mode_and_grid.png' + if area_error_mode_last_points_dict: + figname = f'{folder}/{args.benchmark_name}_area_per_mode_and_grid_last.png' + else: + figname = f'{folder}/{args.benchmark_name}_area_per_mode_and_grid.png' plt.savefig(figname) diff --git a/sxpat/annotatedGraph.py b/sxpat/annotatedGraph.py index cac399094..e4d2f50db 100644 --- a/sxpat/annotatedGraph.py +++ b/sxpat/annotatedGraph.py @@ -1,3 +1,4 @@ +import re from typing import Dict, List, Callable from sklearn.cluster import SpectralClustering from colorama import Fore, Style @@ -3264,21 +3265,55 @@ def export_annotated_graph(self, filename: str = None): exports the subgraph (annotated graph) to a GV (GraphViz) file :return: """ - # print(f'exporting the annotated subgraph!') - # print(f'{self.subgraph_out_path = }') - with open(filename or self.subgraph_out_path, 'w') as f: - f.write(f"{STRICT} {DIGRAPH} \"{self.name}\" {{\n") - f.write(f"{NODE} [{STYLE} = {FILLED}, {FILLCOLOR} = {WHITE}]\n") - for n in self.subgraph.nodes: - self.export_node(n, f) - for e in self.subgraph.edges: - self.export_edge(e, f) - f.write(f"}}\n") + try: + with open(filename or self.subgraph_out_path, 'w') as f: + f.write(f"{STRICT} {DIGRAPH} \"{self.name}\" {{\n") + f.write(f"{NODE} [{STYLE} = {FILLED}, {FILLCOLOR} = {WHITE}]\n") + for n in self.subgraph.nodes: + self.export_node(n, f) + for e in self.subgraph.edges: + self.export_edge(e, f) + f.write(f"}}\n") + except OSError as exc: + if exc.errno == 36: + pprint.red(f'File name too long! Shortening it..') + + self.shorten_file_name() + pprint.info3(f'{self.subgraph_out_path = }') + with open(filename or self.subgraph_out_path, 'w') as f: + f.write(f"{STRICT} {DIGRAPH} \"{self.name}\" {{\n") + f.write(f"{NODE} [{STYLE} = {FILLED}, {FILLCOLOR} = {WHITE}]\n") + for n in self.subgraph.nodes: + self.export_node(n, f) + for e in self.subgraph.edges: + self.export_edge(e, f) + f.write(f"}}\n") + else: + raise folder, extension = OUTPUT_PATH[GV] - # print(f'{self.subgraph_out_path = }') - # print(f'{self.name = }') subprocess.run(f'dot -Tpng {self.subgraph_out_path} > {folder}/{self.name}_subgraph.png', shell=True) + + def shorten_file_name(self): + name = self.subgraph_out_path + print(f'{name = }') + pattern = r"^(.*?)(_0)" + base_part = re.search(pattern, name).group(1) + print(f'{base_part = }') + tail_pattern = r"(.*_0)(.*)$" + tail = re.search(tail_pattern, name).group(2) + print(f'{tail = }') + + pattern = '_short(\d+)' + match = re.search(pattern, base_part) + if match: + short_number = int(match.group(1)) + base_part = re.sub(pattern, '', base_part) + new_name = f'{base_part}_short{short_number+1}{tail}' + else: + new_name = f'{base_part}_short1{tail}' + + self.subgraph_out_path = new_name # TODO:for external modifications def evaluate_subgraph_error(self) -> float: """ diff --git a/sxpat/xplore.py b/sxpat/xplore.py index 697a29099..5782112dd 100644 --- a/sxpat/xplore.py +++ b/sxpat/xplore.py @@ -24,6 +24,36 @@ from z_marco.utils import pprint, color +def generate_et_array(et, k): + step = et // k + result = [] + offset = [] + + + if step <= 1: + result = list(range(1, et+1)) + else: + log2 = int(math.log2(step)) + # print(f'{log2 = }') + for i in range(log2 + 1): + if 2**i < step: + offset.append(2**i) + # print(f'{offset = }') + for i in range(k): + + base = i * step + # print(f'{base = }') + if base > 0: + result.append(base) + for off in offset: + result.append(base + off) + next_base = (i + 1) * step if (i + 1) < k else et + if next_base == et: + result.append(et) + + + return result + def explore_grid(specs_obj: TemplateSpecs): @@ -89,6 +119,12 @@ def explore_grid(specs_obj: TemplateSpecs): persistance_limit = 3 persistance = 1 prev_et = -1 + + et_iterator = iter(generate_et_array(max_et, 8)) + print(f'{list(et_iterator) = }') + et_iterator = iter(generate_et_array(max_et, 8)) + # print(f'{type(et_iterator) = }') + # exit() while (obtained_wce_exact < available_error): i += 1 if specs_obj.et_partitioning == 'asc': @@ -98,20 +134,23 @@ def explore_grid(specs_obj: TemplateSpecs): log2 = int(math.log2(specs_obj.et)) et = 2**(log2 - i - 2) elif specs_obj.et_partitioning == 'smart_asc': - if max_et >= 8: - et = max_et // 8 if i == 1 else et + (max_et // 8 if (loop_detected or prev_actual_error == 0 or persistance == persistance_limit) else 0 ) - if prev_et == et: - persistance += 1 - else: - persistance = 0 - prev_et = et + + # et = max_et // 8 if i == 1 else et + (max_et // 8 if (loop_detected or prev_actual_error == 0 or persistance == persistance_limit) else 0 ) + if i == 1: + et = next(et_iterator) else: - et = 1 if i == 1 else et + (1 if (loop_detected or prev_actual_error == 0 or persistance == persistance_limit) else 0 ) - if prev_et == et: - persistance += 1 - else: - persistance = 0 - prev_et = et + if (loop_detected or prev_actual_error == 0 or persistance == persistance_limit) and (et < available_error): + et = next(et_iterator) + + if prev_et == et: + persistance += 1 + else: + persistance = 0 + prev_et = et + + if prev_et == available_error and persistance == persistance_limit: + break + elif specs_obj.et_partitioning == 'smart_desc': et = available_error if i == 1 else et // (2 if prev_actual_error == 0 else 1) # if et > available_error: @@ -119,12 +158,16 @@ def explore_grid(specs_obj: TemplateSpecs): # prev_given_error = et else: raise NotImplementedError('invalid status') + + + if et > available_error or et < 0: break + pprint.info1(f'iteration {i} with et {et}, available error {available_error}' if (specs_obj.subxpat or specs_obj.subxpat_v2) else f'Only one iteration with et {et}') From ed5d617bc887b05b307505503ebb3aa8f122ae0e Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Sat, 29 Jun 2024 23:16:24 +0200 Subject: [PATCH 5/7] subgraph extraction mode 5 improved (faster now)! --- analyze.py | 95 +++++- analyze_subgraph.py | 78 +++++ main.py | 1 + sxpat/annotatedGraph.py | 619 +++++++++++++++++++++++++++++++++++++++- sxpat/xplore.py | 6 +- test.py | 76 +++++ 6 files changed, 864 insertions(+), 11 deletions(-) create mode 100644 analyze_subgraph.py create mode 100644 test.py diff --git a/analyze.py b/analyze.py index 7b8f52d5b..13043b550 100644 --- a/analyze.py +++ b/analyze.py @@ -21,12 +21,12 @@ def main(): rel_files = get_relevant_files(args, folder) - # area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) - # plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=None, - # mecals_area_error=mecals_area_error) - # area_error_mode_last_points_dict = get_area_error_per_mode_per_grid_last_points(rel_files, folder) - # plot_area_error_mode_per_grid_dict(args, area_error_mode_last_points_dict, area_error_mode_last_points_dict=None, mecals_area_error=mecals_area_error) - # + area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) + plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=None, + mecals_area_error=mecals_area_error) + area_error_mode_last_points_dict = get_area_error_per_mode_per_grid_last_points(rel_files, folder) + plot_area_error_mode_per_grid_dict(args, area_error_mode_last_points_dict, area_error_mode_last_points_dict=None, mecals_area_error=mecals_area_error) + # # # @@ -49,7 +49,8 @@ def main(): sorted_area_error = get_area_error(rel_files, folder) plot_area_error(args, sorted_area_error, mecals_area_error) - + plot_area_error_best(args, sorted_area_error, mecals_area_error) + plot_area_error_pareto(args, sorted_area_error, mecals_area_error) # area_error_ns_dict = get_area_error_per_num_subgraphs(rel_files, folder) # plot_area_error_per_ns(args, area_error_ns_dict) @@ -500,6 +501,86 @@ def plot_area_error(args, sorted_area_error: List, mecals_area_error: List = Non figname = f'{folder}/{args.benchmark_name}_all.png' plt.savefig(figname) + +def plot_area_error_best(args, sorted_area_error: List, mecals_area_error: List = None): + + + min_area_by_error = {} + for area, error in sorted_area_error: + if error in min_area_by_error: + min_area_by_error[error] = min(min_area_by_error[error], area) + else: + min_area_by_error[error] = area + areas = [item[1] for item in min_area_by_error.items()] + errors = [item[0] for item in min_area_by_error.items()] + # Plot the data + plt.figure(figsize=(10, 6)) + assert len(errors) == len(areas) + if mecals_area_error: + mecals_areas = [item[0] for item in mecals_area_error] + mecals_errors = [item[1] for item in mecals_area_error] + + plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') + plt.legend() + + plt.plot(errors, areas, 'o', label='SubXPAT', color='blue') + plt.title(f'{args.benchmark_name}, ') + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_best.png' + plt.savefig(figname) + +def is_dominated(current_point, points): + """ Check if the current point is dominated by any point in the list """ + for point in points: + if point[0] < current_point[0] and point[1] < current_point[1] and point != current_point: + return True + return False + +def find_pareto_points(points): + """ Find and return the list of Pareto optimal points """ + pareto_points = [] + for point in points: + if not is_dominated(point, points): + pareto_points.append(point) + return pareto_points + +def plot_area_error_pareto(args, sorted_area_error: List, mecals_area_error: List = None): + + + # min_area_by_error = {} + # for area, error in sorted_area_error: + # if error in min_area_by_error: + # min_area_by_error[error] = min(min_area_by_error[error], area) + # else: + # min_area_by_error[error] = area + # areas = [item[1] for item in min_area_by_error.items()] + # errors = [item[0] for item in min_area_by_error.items()] + + pareto_points = find_pareto_points(sorted_area_error) + areas = [item[0] for item in pareto_points] + errors = [item[1] for item in pareto_points] + # Plot the data + plt.figure(figsize=(10, 6)) + assert len(errors) == len(areas) + if mecals_area_error: + mecals_areas = [item[0] for item in mecals_area_error] + mecals_errors = [item[1] for item in mecals_area_error] + + plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') + plt.legend() + + plt.plot(errors, areas, 'o', label='SubXPAT', color='blue') + plt.title(f'{args.benchmark_name}, ') + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_pareto.png' + plt.savefig(figname) + def plot_area_error_per_ns(args, area_error_ns_dict: Dict): plt.figure(figsize=(10, 6)) plt.title(f'{args.benchmark_name} per number of subgraphs') diff --git a/analyze_subgraph.py b/analyze_subgraph.py new file mode 100644 index 000000000..5f5bcd89c --- /dev/null +++ b/analyze_subgraph.py @@ -0,0 +1,78 @@ +import os +import csv +import pandas as pd +import re +import matplotlib.pyplot as plt + +def main(): + directory = 'new_subgraph_extraction/reports/' + + # List to hold DataFrames + dataframes = [] + dataframes_dict = {} + for filename in os.listdir(directory): + if filename.endswith('.csv'): + + df = pd.read_csv(os.path.join(directory, filename), sep=',') + dataframes_dict[filename] = df + + benchmarks = [] + modes = [] + ets = [] + + all_dict = {} + for el in dataframes_dict.keys(): + + benchmark = re.search('(.*)_mode5.*', el).group(1) + mode = int(re.search('mode(\d+)_', el).group(1)) + et = int(re.search('et(\d+)_', el).group(1)) + solver_time = dataframes_dict[el]['solver time'][0] + total = dataframes_dict[el]['total'][0] + if benchmark not in all_dict.keys(): + all_dict[benchmark] = {} + if mode not in all_dict[benchmark].keys(): + all_dict[benchmark][mode] = {} + if et not in all_dict[benchmark][mode].keys(): + all_dict[benchmark][mode][et] = {} + all_dict[benchmark][mode][et] = (solver_time, total) + + plot_dict(all_dict) + + + +def plot_dict(all_dict): + color_map = { + 51: 'blue', + 53: 'red' + # Add more modes and their respective colors as needed + } + for benchmark, modes in all_dict.items(): + plt.figure(figsize=(10, 6)) + for mode, ets_dict in modes.items(): + ets_sorted = sorted(ets_dict.keys()) + solver_times = [ets_dict[et][0] for et in ets_sorted] + total_times = [ets_dict[et][1] for et in ets_sorted] + color = color_map.get(mode, 'black') + plt.plot(ets_sorted, solver_times, label=f'Mode {mode}', marker='o', color=color) + plt.plot(ets_sorted, total_times, linestyle='--', label=f'Total Time (Mode {mode})', color=color) + + if 51 in modes and 53 in modes: + for ets in ets_sorted: + if ets in modes[51] and ets in modes[53]: + solver_time_51 = modes[51][ets][0] + solver_time_53 = modes[53][ets][0] + percentage_faster = ((solver_time_51 - solver_time_53) / solver_time_51) * 100 + plt.annotate(f'{percentage_faster:.1f}%', (ets, (solver_time_51 + solver_time_53) / 2), + textcoords="offset points", xytext=(0, 10), ha='center', color='green', fontsize=9) + + plt.xlabel('ET') + plt.ylabel('Solver Time') + plt.title(f'Solver Time vs ET for {benchmark}') + plt.legend() + plt.xscale('log') + plt.grid(True) + figname = f'{benchmark}.png' + plt.savefig(figname) + +if __name__ == "__main__": + main() diff --git a/main.py b/main.py index 815579de6..76b008120 100644 --- a/main.py +++ b/main.py @@ -63,6 +63,7 @@ def main(): if specs_obj.grid: stats_obj = explore_grid(specs_obj) + else: # todo:question: What should happen here? raise NotImplementedError('WIP: for now --grid must be passed') diff --git a/sxpat/annotatedGraph.py b/sxpat/annotatedGraph.py index e4d2f50db..fa15af4d4 100644 --- a/sxpat/annotatedGraph.py +++ b/sxpat/annotatedGraph.py @@ -1,5 +1,8 @@ +import csv import re from typing import Dict, List, Callable + +import z3 from sklearn.cluster import SpectralClustering from colorama import Fore, Style import time @@ -377,6 +380,25 @@ def extract_subgraph(self, specs_obj: TemplateSpecs): cnt_nodes += 1 pprint.success(f" (#ofNodes={cnt_nodes})") + elif mode == 52: + pprint.info2( + f"Partition with omax={specs_obj.omax} and hard feasibility constraints on inputs and outputs. Looking for largest partition") + self.subgraph = self.find_subgraph_feasible_hard_limited_inputs_refined( + specs_obj) # Critian's subgraph extraction + cnt_nodes = 0 + for gate_idx in self.gate_dict: + if self.subgraph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] == 1: + cnt_nodes += 1 + elif mode == 53: + pprint.info2( + f"Partition with omax={specs_obj.omax} and hard constraints, imax, omax, assumptions, and BitVec. Looking for largest partition") + self.subgraph = self.find_subgraph_feasible_hard_limited_inputs_refined_bitvec( + specs_obj) # Critian's subgraph extraction + cnt_nodes = 0 + for gate_idx in self.gate_dict: + if self.subgraph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] == 1: + cnt_nodes += 1 + pprint.success(f" (#ofNodes={cnt_nodes})") else: raise Exception('invalid mode!') else: @@ -1904,6 +1926,7 @@ def find_subgraph_feasible_hard_limited_inputs(self, specs_obj: TemplateSpecs): extracts a colored subgraph from the original non-partitioned graph object :return: an annotated graph in which the extracted subgraph is colored """ + total_s = time.time() omax = specs_obj.omax feasibility_treshold = specs_obj.et # print(f'{feasibility_treshold = }') @@ -2141,7 +2164,7 @@ def find_subgraph_feasible_hard_limited_inputs(self, specs_obj: TemplateSpecs): skipped_nodes_constraints = [node_literal == False for node_literal in skipped_nodes] opt.add(skipped_nodes_constraints) # ==================================================================================================== - + sat_time_s = time.time() node_partition = [] if opt.check() == sat: pprint.success("subgraph found -> SAT", end='') @@ -2155,7 +2178,8 @@ def find_subgraph_feasible_hard_limited_inputs(self, specs_obj: TemplateSpecs): node_partition.append(gate_id) # Gates inside the partition else: pprint.warning("subgraph not found -> UNSAT") - + sat_time_e = time.time() + print(f'\nSolver time = {round(sat_time_e - sat_time_s, 2)}') # Check partition convexity for i in range(len(node_partition) - 1): for j in range(i + 1, len(node_partition)): @@ -2199,6 +2223,597 @@ def find_subgraph_feasible_hard_limited_inputs(self, specs_obj: TemplateSpecs): else: tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 0 tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE + + total_e = time.time() + current_time = datetime.datetime.now() + # with open( + # f'{specs_obj.benchmark_name}_mode{specs_obj.mode}_et{specs_obj.et}_{current_time.strftime("%Y%m%d:%H%M%S")}.csv', + # 'w') as f: + # csvwriter = csv.writer(f) + # header = ['solver time', 'total'] + # csvwriter.writerow(header) + # csvwriter.writerow([round(sat_time_e - sat_time_s, 4), round(total_e - total_s, 4)]) + + return tmp_graph + + + + def _generate_input_gate_output_literals(self, tmp_graph): + input_literals = {} # literals associated to the input nodes + gate_literals = {} # literals associated to the gates in the circuit + output_literals = {} # literals associated to the output nodes + for e in tmp_graph.edges: + if 'in' in e[0]: # Generate literal for each input node + in_id = int(e[0][2:]) + if in_id not in input_literals: + input_literals[in_id] = Bool("in_%s" % str(in_id)) + if 'g' in e[0]: # Generate literal for each gate in the circuit + g_id = int(e[0][1:]) + if g_id not in gate_literals and g_id not in self.constant_dict: # Not in constant_dict since we don't care about constants + gate_literals[g_id] = Bool("g_%s" % str(g_id)) + + if 'out' in e[1]: # Generate literal for each output node + out_id = int(e[1][3:]) + if out_id not in output_literals: + output_literals[out_id] = Bool("out_%s" % str(out_id)) + + return input_literals, gate_literals, output_literals + + + def _generate_input_gate_output_edges(self, tmp_graph): + input_edges = {} # key = input node id, value = array of id. Contains id of gates in the circuit connected with the input node (childs) + gate_edges = {} # key = gate id, value = array of id. Contains the successors gate (childs) + output_edges = {} # key = output node id, value = array of id. Contains id of gates in the circuit connected with the output node (parents) + + for e in tmp_graph.edges: + if 'in' in e[0]: # Populate input_edges structure + in_id = int(e[0][2:]) + + if in_id not in input_edges: + input_edges[in_id] = [] + # input_edges[in_id].append(int(e[1][1:])) # this is a bug for a case where e = (in1, out1) + # Morteza added ============== + try: + input_edges[in_id].append(int(e[1][1:])) + except: + if re.search('g(\d+)', e[1]): + my_id = int(re.search('g(\d+)', e[1]).group(1)) + input_edges[in_id].append(my_id) + # ============================= + + if 'g' in e[0] and 'g' in e[1]: # Populate gate_edges structure + ns_id = int(e[0][1:]) + nd_id = int(e[1][1:]) + + if ns_id in self.constant_dict: + print("ERROR: Constants should only be connected to output nodes") + exit(0) + if ns_id not in gate_edges: + gate_edges[ns_id] = [] + # try: + gate_edges[ns_id].append(nd_id) + + if 'out' in e[1]: # Populate output_edges structure + out_id = int(e[1][3:]) + if out_id not in output_edges: + output_edges[out_id] = [] + # output_edges[out_id].append(int(e[0][1:])) + # Morteza added ============== + try: + output_edges[out_id].append(int(e[0][1:])) + except: + my_id = int(re.search('(\d+)', e[0]).group(1)) + output_edges[out_id].append(my_id) + + # ============================= + return input_edges, gate_edges, output_edges + + + def _get_skipped_nodes(self, tmp_graph): + skipped_nodes = [] + for node in self.graph.nodes: + if self.graph.nodes[node][WEIGHT] == -1: + if node.startswith('g'): + node_literal = f'{node[0:1]}_{node[1:]}' + elif node.startswith('in'): + node_literal = f'{node[0:2]}_{node[2:]}' + elif node.startswith('out'): + node_literal = f'{node[0:3]}_{node[3:]}' + else: + print(f'Node is neither input, output, nor gate') + raise + skipped_nodes.append(Bool(node_literal)) + return skipped_nodes + + + + def find_subgraph_feasible_hard_limited_inputs_refined(self, specs_obj: TemplateSpecs): + """ + extracts a colored subgraph from the original non-partitioned graph object + :return: an annotated graph in which the extracted subgraph is colored + """ + omax = specs_obj.omax + imax = specs_obj.imax + feasibility_treshold = specs_obj.et + copy_s = time.time() + tmp_graph = self.graph.copy(as_view=False) + print(f'copy_s = {time.time() - copy_s } ') + # Data structures containing the literals + ds = time.time() + input_literals, gate_literals, output_literals = self._generate_input_gate_output_literals(tmp_graph) + # + # print(f'{input_literals = }') + # print(f'{gate_literals = }') + # print(f'{output_literals = }') + + # Data structures containing the edges + input_edges, gate_edges, output_edges = self._generate_input_gate_output_edges(tmp_graph) + # Skipped nodes: the nodes without a weight (actually a weight of -1 which is meaningless) + skipped_nodes = self._get_skipped_nodes(tmp_graph) + skipped_nodes_constraints = [node_literal == False for node_literal in skipped_nodes] + # print(f'{skipped_nodes = }') + # print(f'{skipped_nodes_constraints = }') + + print(f'ds = {time.time() - ds }') + + + # Optimizer + opt = Optimize() + + + opt.add(skipped_nodes_constraints) + + max_func = [] + + # List of all the partition edges + partition_input_edges = [] # list of all the input edges ([S'D_1 + S'D_2 + ..., ...]) + partition_output_edges = [] # list of all the output edges ([S_1D' + S_2D' + ..., ...]) + # Define input edges + for source in input_edges: + edge_in_holder = [] + edge_out_holder = [] + + for destination in input_edges[source]: + e_in = And(Not(input_literals[source]), gate_literals[destination]) + + edge_in_holder.append(e_in) + + partition_input_edges.append(Or(edge_in_holder)) + + # Define gate edges and data structures containing the edge weights + edge_w = {} + edge_constraint = {} + + for source in gate_edges: + edge_in_holder = [] + edge_out_holder = [] + + for destination in gate_edges[source]: + e_in = And(Not(gate_literals[source]), gate_literals[destination]) + e_out = And(gate_literals[source], Not(gate_literals[destination])) + + edge_in_holder.append(e_in) + edge_out_holder.append(e_out) + + partition_input_edges.append(Or(edge_in_holder)) + if source not in edge_w: + edge_w[source] = tmp_graph.nodes[self.gate_dict[source]][WEIGHT] + + if source not in edge_constraint: + edge_constraint[source] = Or(edge_out_holder) + partition_output_edges.append(Or(edge_out_holder)) + + # Define output edges + for output_id in output_edges: + predecessor = output_edges[output_id][ + 0] # Output nodes have only one predecessor (it could be a gate or it could be an input) + if predecessor not in gate_literals: # This handle cases where input and output are directly connected + continue + e_out = And(gate_literals[predecessor], Not(output_literals[output_id])) + if predecessor not in edge_w: + edge_w[predecessor] = tmp_graph.nodes[self.gate_dict[predecessor]][WEIGHT] + if predecessor not in edge_constraint: + edge_constraint[predecessor] = e_out + partition_output_edges.append(e_out) + + # Create graph of the cicuit without input and output nodes + new_G = time.time() + G = nx.DiGraph() + # print(f'{tmp_graph.edges = }') + for e in tmp_graph.edges: + if 'g' in str(e[0]) and 'g' in str(e[1]): + source = int(e[0][1:]) + destination = int(e[1][1:]) + + G.add_edge(source, destination) + # Morteza added ===================== + for e in tmp_graph.edges: + if 'g' in str(e[0]): + source = int(e[0][1:]) + if source in self.constant_dict: + continue + G.add_node(source) + # =================================== + gate_weight = {} + for gate_idx in G.nodes: + + if gate_idx not in gate_weight: + gate_weight[gate_idx] = tmp_graph.nodes[self.gate_dict[gate_idx]][WEIGHT] + # print("Gate", gate_idx, " value ", gate_weight[gate_idx]) + + descendants = {} + ancestors = {} + for n in G: + if n not in descendants: + descendants[n] = list(nx.descendants(G, n)) + if n not in ancestors: + ancestors[n] = list(nx.ancestors(G, n)) + + # Generate convexity constraints + for source in gate_edges: + for destination in gate_edges[source]: + if len(descendants[destination]) > 0: # Constraints on output edges + not_descendants = [Not(gate_literals[l]) for l in descendants[destination]] + not_descendants.append(Not(gate_literals[destination])) + descendat_condition = Implies(And(gate_literals[source], Not(gate_literals[destination])), + And(not_descendants)) + opt.add(descendat_condition) + if len(ancestors[source]) > 0: # Constraints on input edges + not_ancestors = [Not(gate_literals[l]) for l in ancestors[source]] + not_ancestors.append(Not(gate_literals[source])) + ancestor_condition = Implies(And(Not(gate_literals[source]), gate_literals[destination]), + And(not_ancestors)) + opt.add(ancestor_condition) + + # Set input nodes to False + for input_node_id in input_literals: + opt.add(input_literals[input_node_id] == False) + + # Set output nodes to False + for output_node_id in output_literals: + opt.add(output_literals[output_node_id] == False) + + # Add constraints on the number of output edges + opt.add(Sum(partition_input_edges) <= imax) + opt.add(Sum(partition_output_edges) <= omax) + + feasibility_constraints = [] + for s in edge_w: + + if gate_weight[s] <= feasibility_treshold: + # print(s, "is feasible", gate_weight[s]) + feasibility_constraints.append(edge_constraint[s]) + + opt.add(Sum(feasibility_constraints) == Sum(partition_output_edges)) + + # Generate function to maximize + for gate_id in gate_literals: + max_func.append(gate_literals[gate_id]) + + + + opt.maximize(Sum(max_func)) + print(f'new_G = {time.time() - new_G}') + + print(f'{len(opt.assertions()) = }') + + node_partition = [] + sat_time_s = time.time() + if opt.check() == sat: + pprint.success("subgraph found -> SAT", end='') + # print(opt.model()) + m = opt.model() + for t in m.decls(): + if 'g' not in str(t): # Look only the literals associate to the gates + continue + if is_true(m[t]): + gate_id = int(str(t)[2:]) + node_partition.append(gate_id) # Gates inside the partition + else: + pprint.warning("subgraph not found -> UNSAT") + + sat_time_e = time.time() + + print(f'\nSolver time = {round( sat_time_e-sat_time_s , 2)}') + + convex_check = time.time() + # Check partition convexity + for i in range(len(node_partition) - 1): + for j in range(i + 1, len(node_partition)): + u = node_partition[i] + v = node_partition[j] + try: + # print(f'{u = }') + # print(f'{v = }') + # print(f'{G.nodes = }') + # print(f'{G.edges = }') + # print(f'{gate_literals = }') + # print(f'{node_partition = }') + path = nx.shortest_path(G, source=u, target=v) + all_nodes_in_partition = True + + # Check that all the nodes in the shortest path are in the partition + for n in path: + if n not in node_partition: + all_nodes_in_partition = False + + if not all_nodes_in_partition: + print("Partition is not convex") + exit(0) + + except nx.exception.NetworkXNoPath: + # print('Here') + # except: + # pprint.error(f'Node {u} or {v} do not belong to the graph G {G.nodes}') + # raise nx.exception.NetworkXNoPath + # No path between u and v + + # print("No path", u, v) + pass + print(f'Convex_check = {time.time() - convex_check}') + + coloring = time.time() + for gate_idx in self.gate_dict: + # print(f'{gate_idx = }') + if gate_idx in node_partition: + # print(f'{gate_idx} is in the node_partition') + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 1 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = RED + else: + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 0 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE + print(f'coloring = {time.time() - coloring}') + return tmp_graph + + + + def find_subgraph_feasible_hard_limited_inputs_refined_bitvec(self, specs_obj: TemplateSpecs): + """ + extracts a colored subgraph from the original non-partitioned graph object + :return: an annotated graph in which the extracted subgraph is colored + """ + total_s = time.time() + BIT_SIZE = 2 ** self.num_outputs + + omax = specs_obj.omax + imax = specs_obj.imax + feasibility_treshold = specs_obj.et + # print(f'{feasibility_treshold = }') + copy_s = time.time() + tmp_graph = self.graph.copy(as_view=False) + # print(f'copy_s = {time.time() - copy_s } ') + # Data structures containing the literals + ds = time.time() + input_literals, gate_literals, output_literals = self._generate_input_gate_output_literals(tmp_graph) + # Data structures containing the edges + input_edges, gate_edges, output_edges = self._generate_input_gate_output_edges(tmp_graph) + # Skipped nodes: the nodes without a weight (actually a weight of -1 which is meaningless) + skipped_nodes = self._get_skipped_nodes(tmp_graph) + skipped_nodes_constraints = [node_literal == False for node_literal in skipped_nodes] + # print(f'ds = {time.time() - ds }') + + # Optimizer + # opt = Optimize() + opt = Optimize() + # opt.add(skipped_nodes_constraints) + + max_func = [] + + # List of all the partition edges + partition_input_edges = [] # list of all the input edges ([S'D_1 + S'D_2 + ..., ...]) + partition_output_edges = [] # list of all the output edges ([S_1D' + S_2D' + ..., ...]) + # Define input edges + for source in input_edges: + edge_in_holder = [] + for destination in input_edges[source]: + e_in = And(Not(input_literals[source]), gate_literals[destination]) + edge_in_holder.append(e_in) + partition_input_edges.append(Or(edge_in_holder)) + + # Define gate edges and data structures containing the edge weights + edge_w = {} + edge_constraint = {} + gate_weight = {} # Use BitVec for weights + + for source in gate_edges: + edge_in_holder = [] + edge_out_holder = [] + for destination in gate_edges[source]: + e_in = And(Not(gate_literals[source]), gate_literals[destination]) + e_out = And(gate_literals[source], Not(gate_literals[destination])) + edge_in_holder.append(e_in) + edge_out_holder.append(e_out) + + partition_input_edges.append(Or(edge_in_holder)) + if source not in edge_w: + weight_value = tmp_graph.nodes[self.gate_dict[source]][WEIGHT] + gate_weight[source] = BitVec(f"weight_{source}", BIT_SIZE) + + opt.add(gate_weight[source] == BitVecVal(weight_value, BIT_SIZE)) + edge_w[source] = weight_value # Adding back the weight for edge_w + if source not in edge_constraint: + edge_constraint[source] = Or(edge_out_holder) + partition_output_edges.append(Or(edge_out_holder)) + + # Define output edges + for output_id in output_edges: + predecessor = output_edges[output_id][0] # Output nodes have only one predecessor (it could be a gate or it could be an input) + if predecessor not in gate_literals: # This handle cases where input and output are directly connected + continue + e_out = And(gate_literals[predecessor], Not(output_literals[output_id])) + if predecessor not in edge_w: + weight_value = tmp_graph.nodes[self.gate_dict[predecessor]][WEIGHT] + gate_weight[predecessor] = BitVec(f"weight_{predecessor}", BIT_SIZE) + opt.add(gate_weight[predecessor] == BitVecVal(weight_value, BIT_SIZE)) + edge_w[predecessor] = weight_value # Adding back the weight for edge_w + if predecessor not in edge_constraint: + edge_constraint[predecessor] = e_out + partition_output_edges.append(e_out) + + # Create graph of the cicuit without input and output nodes + new_G = time.time() + G = nx.DiGraph() + # print(f'{tmp_graph.edges = }') + for e in tmp_graph.edges: + if 'g' in str(e[0]) and 'g' in str(e[1]): + source = int(e[0][1:]) + destination = int(e[1][1:]) + + G.add_edge(source, destination) + # Morteza added ===================== + for e in tmp_graph.edges: + if 'g' in str(e[0]): + source = int(e[0][1:]) + if source in self.constant_dict: + continue + G.add_node(source) + # =================================== + # gate_weight = {} + # for gate_idx in G.nodes: + # + # if gate_idx not in gate_weight: + # gate_weight[gate_idx] = tmp_graph.nodes[self.gate_dict[gate_idx]][WEIGHT] + # print("Gate", gate_idx, " value ", gate_weight[gate_idx]) + + descendants = {} + ancestors = {} + for n in G: + if n not in descendants: + descendants[n] = list(nx.descendants(G, n)) + if n not in ancestors: + ancestors[n] = list(nx.ancestors(G, n)) + + # Generate convexity constraints + for source in gate_edges: + for destination in gate_edges[source]: + if len(descendants[destination]) > 0: # Constraints on output edges + not_descendants = [Not(gate_literals[l]) for l in descendants[destination]] + not_descendants.append(Not(gate_literals[destination])) + descendat_condition = Implies(And(gate_literals[source], Not(gate_literals[destination])), + And(not_descendants)) + opt.add(descendat_condition) + if len(ancestors[source]) > 0: # Constraints on input edges + not_ancestors = [Not(gate_literals[l]) for l in ancestors[source]] + not_ancestors.append(Not(gate_literals[source])) + ancestor_condition = Implies(And(Not(gate_literals[source]), gate_literals[destination]), + And(not_ancestors)) + opt.add(ancestor_condition) + + # Set input nodes to False + for input_node_id in input_literals: + opt.add(input_literals[input_node_id] == False) + + # Set output nodes to False + for output_node_id in output_literals: + opt.add(output_literals[output_node_id] == False) + + # Add constraints on the number of output edges + opt.add(Sum(partition_input_edges) <= imax) + opt.add(Sum(partition_output_edges) <= omax) + + feasibility_constraints = [] + feasibility_treshold_bv = BitVecVal(feasibility_treshold, BIT_SIZE) # BitVec for feasibility threshold + + + + for s in edge_w: + if edge_w[s] <= feasibility_treshold: + feasibility_constraints.append(edge_constraint[s]) + + opt.add(Sum(feasibility_constraints) == Sum(partition_output_edges)) + + # for s in edge_w: + # + # if gate_weight[s] <= feasibility_treshold: + # # print(s, "is feasible", gate_weight[s]) + # feasibility_constraints.append(edge_constraint[s]) + # + # opt.add(Sum(feasibility_constraints) == Sum(partition_output_edges)) + + # Generate function to maximize + for gate_id in gate_literals: + max_func.append(gate_literals[gate_id]) + opt.maximize(Sum(max_func)) + # print(f'new_G = {time.time() - new_G}') + + # print(f'{len(opt.assertions()) = }') + + node_partition = [] + sat_time_s = time.time() + if opt.check(skipped_nodes_constraints) == sat: + pprint.success("subgraph found -> SAT", end='') + # print(opt.model()) + m = opt.model() + for t in m.decls(): + if 'g' not in str(t): # Look only the literals associate to the gates + continue + if is_true(m[t]): + gate_id = int(str(t)[2:]) + node_partition.append(gate_id) # Gates inside the partition + else: + pprint.warning("subgraph not found -> UNSAT") + + sat_time_e = time.time() + + # print(f'\nSolver time = {round( sat_time_e-sat_time_s , 2)}') + + convex_check = time.time() + # Check partition convexity + for i in range(len(node_partition) - 1): + for j in range(i + 1, len(node_partition)): + u = node_partition[i] + v = node_partition[j] + try: + # print(f'{u = }') + # print(f'{v = }') + # print(f'{G.nodes = }') + # print(f'{G.edges = }') + # print(f'{gate_literals = }') + # print(f'{node_partition = }') + path = nx.shortest_path(G, source=u, target=v) + all_nodes_in_partition = True + + # Check that all the nodes in the shortest path are in the partition + for n in path: + if n not in node_partition: + all_nodes_in_partition = False + + if not all_nodes_in_partition: + print("Partition is not convex") + exit(0) + + except nx.exception.NetworkXNoPath: + # print('Here') + # except: + # pprint.error(f'Node {u} or {v} do not belong to the graph G {G.nodes}') + # raise nx.exception.NetworkXNoPath + # No path between u and v + + # print("No path", u, v) + pass + # print(f'Convex_check = {time.time() - convex_check}') + + coloring = time.time() + for gate_idx in self.gate_dict: + # print(f'{gate_idx = }') + if gate_idx in node_partition: + # print(f'{gate_idx} is in the node_partition') + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 1 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = RED + else: + tmp_graph.nodes[self.gate_dict[gate_idx]][SUBGRAPH] = 0 + tmp_graph.nodes[self.gate_dict[gate_idx]][COLOR] = WHITE + # print(f'coloring = {time.time() - coloring}') + + total_e = time.time() + current_time = datetime.datetime.now() + # with open(f'{specs_obj.benchmark_name}_mode{specs_obj.mode}_et{specs_obj.et}_{current_time.strftime("%Y%m%d:%H%M%S")}.csv', 'w') as f: + # csvwriter = csv.writer(f) + # header = ['solver time', 'total'] + # csvwriter.writerow(header) + # csvwriter.writerow([round(sat_time_e-sat_time_s , 4), round(total_e-total_s , 4)]) + + return tmp_graph diff --git a/sxpat/xplore.py b/sxpat/xplore.py index 5782112dd..d91472905 100644 --- a/sxpat/xplore.py +++ b/sxpat/xplore.py @@ -121,7 +121,7 @@ def explore_grid(specs_obj: TemplateSpecs): prev_et = -1 et_iterator = iter(generate_et_array(max_et, 8)) - print(f'{list(et_iterator) = }') + # print(f'{list(et_iterator) = }') et_iterator = iter(generate_et_array(max_et, 8)) # print(f'{type(et_iterator) = }') # exit() @@ -191,7 +191,7 @@ def explore_grid(specs_obj: TemplateSpecs): # > grid step settings # initialize context - specs_obj.et = et + specs_obj.et = et template_obj.set_new_context(specs_obj) # import the graph @@ -221,6 +221,8 @@ def explore_grid(specs_obj: TemplateSpecs): print(f'subgraph_number_inputs = {subgraph_number_inputs}') print(f'subgraph_number_outputs = {subgraph_number_outputs}') + + # todo:wip:marco: export subgraph folder = 'output/gv/subgraphs' graph_path = f'{folder}/{specs_obj.benchmark_name}_lpp{specs_obj.lpp}_ppo{specs_obj.ppo}_et{specs_obj.et}_mode{specs_obj.mode}_omax{specs_obj.omax}_serr{specs_obj.sub_error_function}.gv' diff --git a/test.py b/test.py new file mode 100644 index 000000000..92b2aa964 --- /dev/null +++ b/test.py @@ -0,0 +1,76 @@ +from Z3Log.utils import setup_folder_structure +from Z3Log.config import path as z3logpath + +from sxpat.templateSpecs import TemplateSpecs +from sxpat.config import paths as sxpatpaths + +from sxpat.arguments import Arguments + +from sxpat.xplore import explore_grid +from sxpat.stats import Stats + +from z_marco.utils import pprint +from sxpat.utils.filesystem import FS +from sxpat.templateCreator import Template_SOP1 +import time + +def main(): + args = Arguments.parse() + print(f'{args = }') + + if args.clean: + pprint.info2('cleaning...') + clean_all() + + setup_folder_structure() + for (directory, _) in sxpatpaths.OUTPUT_PATH.values(): + FS.mkdir(directory) + + # todo:later: update how we create the templatespecs (more than 2 names, ecc.) + specs_obj = TemplateSpecs(name='Sop1' if not args.shared else 'SharedLogic', exact=args.benchmark_name, + literals_per_product=args.lpp, products_per_output=args.ppo, + benchmark_name=args.approximate_benchmark, num_of_models=args.num_models, + subxpat=args.subxpat, subxpat_v2=args.subxpat_v2, + full_error_function=args.full_error_function, sub_error_function=args.sub_error_function, + et=args.et, et_partitioning=args.et_partitioning, + partitioning_percentage=args.partitioning_percentage, iterations=args.iterations, + grid=args.grid, imax=args.imax, omax=args.omax, sensitivity=args.sensitivity, + timeout=args.timeout, subgraph_size=args.subgraph_size, mode=args.mode, population=args.population, + min_labeling=args.min_labeling, manual_nodes=args.manual_nodes, + shared=args.shared, products_in_total=args.pit, parallel=args.parallel, encoding=args.encoding, + partial_labeling=args.partial_labeling, num_subgraphs=args.num_subgraphs) + + pprint.info1(f'Creating the template...') + template_obj = Template_SOP1(specs_obj) + template_obj.current_graph = template_obj.import_graph() + + labeling_start = time.time() + et_coefficient = 2 + pprint.info1(f'Labeling...') + template_obj.label_graph(min_labeling=specs_obj.min_labeling, partial=specs_obj.partial_labeling, et=specs_obj.et * et_coefficient, parallel=specs_obj.parallel) + labeling_time = time.time() - labeling_start + print(f'labeling_time = {labeling_time}') + t_start = time.time() + subgraph_is_available = template_obj.current_graph.extract_subgraph(specs_obj) + subgraph_extraction_time = time.time() - t_start + print(f'{subgraph_extraction_time = }') + +def clean_all(): + for (directory, _) in [ + z3logpath.OUTPUT_PATH['ver'], + z3logpath.OUTPUT_PATH['gv'], + z3logpath.OUTPUT_PATH['aig'], + z3logpath.OUTPUT_PATH['z3'], + z3logpath.OUTPUT_PATH['report'], + z3logpath.OUTPUT_PATH['figure'], + z3logpath.TEST_PATH['tb'], + sxpatpaths.OUTPUT_PATH['area'], + sxpatpaths.OUTPUT_PATH['power'], + sxpatpaths.OUTPUT_PATH['delay'], + sxpatpaths.OUTPUT_PATH['json'] + ]: + FS.cleandir(directory) + + +if __name__ == "__main__": + main() \ No newline at end of file From 8a06e3d1c910e6c77ccb25c2487b3beecaa3f85b Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Wed, 3 Jul 2024 22:11:43 +0200 Subject: [PATCH 6/7] latest updates: mode 53 (improved subgraph extraction) --- analyze.py | 122 ++++++++++++++++++++++++++++++---------- sxpat/annotatedGraph.py | 4 +- 2 files changed, 95 insertions(+), 31 deletions(-) diff --git a/analyze.py b/analyze.py index 13043b550..bec78ac35 100644 --- a/analyze.py +++ b/analyze.py @@ -21,11 +21,11 @@ def main(): rel_files = get_relevant_files(args, folder) - area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) - plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=None, - mecals_area_error=mecals_area_error) + # area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) + # plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=None, + # mecals_area_error=mecals_area_error) area_error_mode_last_points_dict = get_area_error_per_mode_per_grid_last_points(rel_files, folder) - plot_area_error_mode_per_grid_dict(args, area_error_mode_last_points_dict, area_error_mode_last_points_dict=None, mecals_area_error=mecals_area_error) + # plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) # # @@ -48,9 +48,9 @@ def main(): sorted_area_error = get_area_error(rel_files, folder) - plot_area_error(args, sorted_area_error, mecals_area_error) - plot_area_error_best(args, sorted_area_error, mecals_area_error) - plot_area_error_pareto(args, sorted_area_error, mecals_area_error) + plot_area_error(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) + # plot_area_error_best(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) + plot_area_error_pareto(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) # area_error_ns_dict = get_area_error_per_num_subgraphs(rel_files, folder) # plot_area_error_per_ns(args, area_error_ns_dict) @@ -260,8 +260,9 @@ def get_area_error_per_mode_per_grid_last_points(relevant_files, folder): for mode in range(200): for rep in relevant_files: last_point = False - min_area = float('inf') - max_error = 0 + last_area = float('inf') + last_et = 0 + last_iteration = 0 if re.search(f'mode{mode}_', rep): et = int(re.search(f'et(\d+)_', rep).group(1)) grid = re.search(f'(\d+X\d+)', rep).group() @@ -275,12 +276,15 @@ def get_area_error_per_mode_per_grid_last_points(relevant_files, folder): else: if line[3].startswith('SAT'): area = float(line[5]) - # error = int(line[8]) + error = int(line[8]) + iteration = int(line[1]) + if iteration > last_iteration: + last_iteration = iteration + last_area = area + last_et = error - if area < min_area: - min_area = area - last_point = (min_area, et) + last_point = (last_area, last_et) if grid not in area_error_per_grid.keys(): area_error_per_grid[grid] = [] area_error_per_grid[grid].append(last_point) @@ -293,8 +297,8 @@ def get_area_error_per_mode_grid(relevant_files, folder): area_error_mode_per_grid_dict: Dict[int: [float, int]] = {} area_error_per_grid = {} grids = [] - print(f'{relevant_files = }') - print(f'{len(relevant_files) = }') + # print(f'{relevant_files = }') + # print(f'{len(relevant_files) = }') for mode in range(200): for rep in relevant_files: @@ -478,13 +482,24 @@ def tuple_score(tpl): figname = f'{folder}/{args.benchmark_name}_cell_scores_per_mode.png' plt.savefig(figname) -def plot_area_error(args, sorted_area_error: List, mecals_area_error: List = None): - areas = [item[0] for item in sorted_area_error] - errors = [item[1] for item in sorted_area_error] +def plot_area_error(args, sorted_area_error: List, area_error_mode_last_points_dict: Dict = None, mecals_area_error: List = None): + color_map = { + 51: 'red', + 53: 'orange', + 12: 'cyan', + 121: 'green' - # Plot the data + # Add more modes and their respective colors as needed + } plt.figure(figsize=(10, 6)) + + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] assert len(errors) == len(areas) + plt.scatter(errors, areas, marker='D', label='SubXPAT', color='blue', alpha=0.2, s=50) + # Plot the data + + if mecals_area_error: mecals_areas = [item[0] for item in mecals_area_error] mecals_errors = [item[1] for item in mecals_area_error] @@ -492,17 +507,29 @@ def plot_area_error(args, sorted_area_error: List, mecals_area_error: List = Non plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') plt.legend() - plt.plot(errors, areas, 'o', label='SubXPAT', color='blue') + if area_error_mode_last_points_dict: + for mode in area_error_mode_last_points_dict.keys(): + sorted_area_error_per_mode = area_error_mode_last_points_dict[mode] + for grid in sorted_area_error_per_mode.keys(): + sorted_area_error = sorted_area_error_per_mode[grid] + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + color = color_map.get(mode, 'black') + plt.scatter(errors, areas, marker='s', label=f'mode{mode} (final-points), grid{grid}', alpha=1, + color=color) + + plt.title(f'{args.benchmark_name}, ') plt.xlabel('ET') plt.ylabel('Area') plt.grid(True) + plt.legend() folder, _ = OUTPUT_PATH['figure'] figname = f'{folder}/{args.benchmark_name}_all.png' plt.savefig(figname) -def plot_area_error_best(args, sorted_area_error: List, mecals_area_error: List = None): +def plot_area_error_best(args, sorted_area_error: List, area_error_mode_last_points_dict: Dict = None, mecals_area_error: List = None): min_area_by_error = {} @@ -515,6 +542,7 @@ def plot_area_error_best(args, sorted_area_error: List, mecals_area_error: List errors = [item[0] for item in min_area_by_error.items()] # Plot the data plt.figure(figsize=(10, 6)) + plt.scatter(errors, areas, marker='D', label='SubXPAT', color='blue', alpha=0.5, s=50) assert len(errors) == len(areas) if mecals_area_error: mecals_areas = [item[0] for item in mecals_area_error] @@ -523,7 +551,18 @@ def plot_area_error_best(args, sorted_area_error: List, mecals_area_error: List plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') plt.legend() - plt.plot(errors, areas, 'o', label='SubXPAT', color='blue') + + + + if area_error_mode_last_points_dict: + for mode in area_error_mode_last_points_dict.keys(): + sorted_area_error_per_mode = area_error_mode_last_points_dict[mode] + for grid in sorted_area_error_per_mode.keys(): + sorted_area_error = sorted_area_error_per_mode[grid] + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + plt.scatter(errors, areas, marker='D', label=f'mode{mode}, grid{grid}', alpha=1, color='orange') + plt.title(f'{args.benchmark_name}, ') plt.xlabel('ET') plt.ylabel('Area') @@ -535,8 +574,13 @@ def plot_area_error_best(args, sorted_area_error: List, mecals_area_error: List def is_dominated(current_point, points): """ Check if the current point is dominated by any point in the list """ for point in points: - if point[0] < current_point[0] and point[1] < current_point[1] and point != current_point: - return True + # if point[0] < current_point[0] and point[1] < current_point[1] and point != current_point: + if point != current_point: + if point[0] <= current_point[0] and point[1] <= current_point[1]: + return True + + + return False def find_pareto_points(points): @@ -547,8 +591,15 @@ def find_pareto_points(points): pareto_points.append(point) return pareto_points -def plot_area_error_pareto(args, sorted_area_error: List, mecals_area_error: List = None): +def plot_area_error_pareto(args, sorted_area_error: List, area_error_mode_last_points_dict: Dict = None, mecals_area_error: List = None): + color_map = { + 51: 'red', + 53: 'orange', + 12: 'cyan', + 121: 'green' + # Add more modes and their respective colors as needed + } # min_area_by_error = {} # for area, error in sorted_area_error: @@ -558,25 +609,38 @@ def plot_area_error_pareto(args, sorted_area_error: List, mecals_area_error: Lis # min_area_by_error[error] = area # areas = [item[1] for item in min_area_by_error.items()] # errors = [item[0] for item in min_area_by_error.items()] - + # print(f'{sorted_area_error= }') pareto_points = find_pareto_points(sorted_area_error) + # print(f'{pareto_points= }') areas = [item[0] for item in pareto_points] errors = [item[1] for item in pareto_points] # Plot the data plt.figure(figsize=(10, 6)) + plt.scatter(errors, areas, marker='D', label='SubXPAT', color='blue', alpha=1, s=50) assert len(errors) == len(areas) if mecals_area_error: mecals_areas = [item[0] for item in mecals_area_error] mecals_errors = [item[1] for item in mecals_area_error] plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') - plt.legend() - plt.plot(errors, areas, 'o', label='SubXPAT', color='blue') + + + if area_error_mode_last_points_dict: + for mode in area_error_mode_last_points_dict.keys(): + sorted_area_error_per_mode = area_error_mode_last_points_dict[mode] + for grid in sorted_area_error_per_mode.keys(): + sorted_area_error = sorted_area_error_per_mode[grid] + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + color = color_map.get(mode, 'black') + plt.scatter(errors, areas, marker='s', label=f'mode{mode} (final-points), grid{grid}', alpha=1, color=color) + plt.title(f'{args.benchmark_name}, ') plt.xlabel('ET') plt.ylabel('Area') plt.grid(True) + plt.legend() folder, _ = OUTPUT_PATH['figure'] figname = f'{folder}/{args.benchmark_name}_pareto.png' plt.savefig(figname) @@ -679,7 +743,7 @@ def plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict: Dict sorted_area_error = sorted_area_error_per_mode[grid] areas = [item[0] for item in sorted_area_error] errors = [item[1] for item in sorted_area_error] - plt.scatter(errors, areas, marker='s', label=f'mode{mode}, grid{grid}', alpha=0.5) + plt.scatter(errors, areas, marker='D', label=f'mode{mode}, grid{grid}', alpha=1) plt.legend() plt.xlabel('ET') diff --git a/sxpat/annotatedGraph.py b/sxpat/annotatedGraph.py index fa15af4d4..ee4af2fe9 100644 --- a/sxpat/annotatedGraph.py +++ b/sxpat/annotatedGraph.py @@ -2596,7 +2596,7 @@ def find_subgraph_feasible_hard_limited_inputs_refined_bitvec(self, specs_obj: T # opt = Optimize() opt = Optimize() # opt.add(skipped_nodes_constraints) - + set_param("parallel.enable", True) max_func = [] # List of all the partition edges @@ -2755,7 +2755,7 @@ def find_subgraph_feasible_hard_limited_inputs_refined_bitvec(self, specs_obj: T sat_time_e = time.time() - # print(f'\nSolver time = {round( sat_time_e-sat_time_s , 2)}') + print(f'\nSolver time = {round( sat_time_e-sat_time_s , 2)}') convex_check = time.time() # Check partition convexity From c806e5c934cbff37806d7c3c4ce3c82840dd5d61 Mon Sep 17 00:00:00 2001 From: MorellRAP Date: Fri, 5 Jul 2024 13:35:41 +0200 Subject: [PATCH 7/7] minor update --- analyze.py | 220 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 202 insertions(+), 18 deletions(-) diff --git a/analyze.py b/analyze.py index bec78ac35..d901cab01 100644 --- a/analyze.py +++ b/analyze.py @@ -5,6 +5,8 @@ import matplotlib.pyplot as plt import re import itertools +import matplotlib.cm as cm +import numpy as np from typing import List, Tuple, Dict from sxpat.config.paths import * @@ -20,11 +22,23 @@ def main(): folder = 'experiments/results' rel_files = get_relevant_files(args, folder) + # area_error_mode_et = get_area_error_per_et(rel_files, folder) + # plot_area_error_per_et(args, area_error_mode_et) + + + + + area_error_per_omax_dict = get_area_error_per_omax(rel_files, folder) + plot_area_error_per_omax(args, area_error_per_omax_dict) + + runtime_error_per_omax_per_mode = get_runtime_per_mode_and_omax(rel_files, folder) + + plot_runtime_per_mode_and_omax(args, runtime_error_per_omax_per_mode) + exit() - # area_error_mode_per_grid_dict = get_area_error_per_mode_grid(rel_files, folder) # plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=None, # mecals_area_error=mecals_area_error) - area_error_mode_last_points_dict = get_area_error_per_mode_per_grid_last_points(rel_files, folder) + # area_error_mode_last_points_dict = get_area_error_per_mode_per_grid_last_points(rel_files, folder) # plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) # @@ -41,16 +55,17 @@ def main(): # # # # - # get_runtime_per_mode_decomposition(rel_files, folder) + get_runtime_per_mode_decomposition(rel_files, folder) # sorted_runtime_error_per_mode = get_runtime_per_mode(rel_files, folder) # plot_runtime_error_per_mode(args, sorted_runtime_error_per_mode) - sorted_area_error = get_area_error(rel_files, folder) - plot_area_error(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) - # plot_area_error_best(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) - plot_area_error_pareto(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) + # sorted_area_error = get_area_error(rel_files, folder) + # plot_area_error(args, sorted_area_error, area_error_mode_last_points_dict=None, mecals_area_error=mecals_area_error) + # # plot_area_error(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) + # # plot_area_error_best(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) + # plot_area_error_pareto(args, sorted_area_error, area_error_mode_last_points_dict=area_error_mode_last_points_dict, mecals_area_error=mecals_area_error) # area_error_ns_dict = get_area_error_per_num_subgraphs(rel_files, folder) # plot_area_error_per_ns(args, area_error_ns_dict) @@ -163,6 +178,34 @@ def get_area_error(relevant_files, folder): return sorted_area_error +def get_area_error_per_et(relevant_files, folder): + area_error_mode_et_dict: Dict = {} + area_error_et: Dict = {} + for mode in range(200): + for rep in relevant_files: + area_error: List = [] + if re.search(f'mode{mode}_', rep): + et = int(re.search('et(\d+)_', rep).group(1)) + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + error = int(line[8]) + area_error.append((area, error)) + sorted_area_error = sorted(area_error, key=lambda x: x[1]) + area_error_et[et] = sorted_area_error + if area_error_et: + area_error_mode_et_dict[mode] = area_error_et + area_error_et = {} + return area_error_mode_et_dict + + + + def get_mecals_area_error(args, folder): area_error: List[Tuple[float, int]] = [] relevant_files = _get_mecals_rel_files(args, folder) @@ -325,7 +368,34 @@ def get_area_error_per_mode_grid(relevant_files, folder): area_error_per_grid = {} return area_error_mode_per_grid_dict +def get_area_error_per_omax(relevant_files, folder): + area_error_mode_per_grid_dict: Dict[int: [float, int]] = {} + area_error_per_grid = {} + grids = [] + for omax in range(10): + for rep in relevant_files: + if re.search(f'omax{omax}_', rep): + grid = re.search(f'(\d+X\d+)', rep).group() + if grid not in grids: + grids.append(grid) + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + if line[3].startswith('SAT'): + area = float(line[5]) + error = int(line[8]) + + if grid not in area_error_per_grid.keys(): + area_error_per_grid[grid] = [] + area_error_per_grid[grid].append((area, error)) + if area_error_per_grid: + area_error_mode_per_grid_dict[omax] = area_error_per_grid + area_error_per_grid = {} + return area_error_mode_per_grid_dict def get_runtime_per_num_subgraphs(relevant_files, folder): pass @@ -359,6 +429,43 @@ def get_runtime_per_mode(relevant_files, folder): return runtime_error_per_mode_dict + +def get_runtime_per_mode_and_omax(relevant_files, folder): + runtime_error_per_omax_per_mode: Dict = {} + runtime_error_per_omax: Dict = {} + runtime_error = [] + for mode in range(200): + for omax in range(20): + for rep in relevant_files: + runtime = 0 + if re.search(f'mode{mode}_', rep) and re.search(f'omax{omax}_', rep): + et = int(re.search(f'et(\d+)_', rep).group(1)) + with open(f'{folder}/{rep}', 'r') as f: + csvreader = csv.reader(f) + for line in csvreader: + if line[0].startswith('cell'): # skip the first line + continue + else: + subgraph_extraction = float(line[9]) + labeling = float(line[10]) + exploration = float(line[-1]) + cur_runtime = subgraph_extraction + labeling + exploration + runtime += cur_runtime + runtime_error.append((et, runtime)) + + if runtime_error: + sorted_runtime_error = sorted(runtime_error, key=lambda x: x[0]) + runtime_error_per_omax[omax] = sorted_runtime_error + runtime_error = [] + if runtime_error_per_omax: + runtime_error_per_omax_per_mode[mode] = runtime_error_per_omax + runtime_error_per_omax = [] + + return runtime_error_per_omax_per_mode + + + + def get_runtime_per_mode_decomposition(relevant_files, folder): runtime_error_decomposed_dict: Dict[Tuple[int, float]] = {} @@ -379,17 +486,6 @@ def get_runtime_per_mode_decomposition(relevant_files, folder): if error not in runtime_error_decomposed_dict: runtime_error_decomposed_dict[error] = [] runtime_error_decomposed_dict[error].append((subgraph_extraction, labeling, exploration)) - - - - # runtime_error = [(runtime, error) for error, runtime in runtime_error_dict.items()] - # sorted_runtime_error = sorted(runtime_error, key=lambda x: x[1]) - # if sorted_runtime_error: - # runtime_error_per_mode_dict[mode] = sorted_runtime_error - # runtime_error_dict ={} - # for el in runtime_error_decomposed_dict: - # print(f'{runtime_error_decomposed_dict[el] = }') - # print(f'{runtime_error_decomposed_dict = }') return runtime_error_per_mode_dict @@ -529,6 +625,34 @@ def plot_area_error(args, sorted_area_error: List, area_error_mode_last_points_d plt.savefig(figname) +def plot_area_error_per_et(args, area_error_mode_et): + plt.figure(figsize=(10, 6)) + color_idx = 0 + distinct_colors = ['red', 'blue', 'green', 'orange', 'purple', 'brown', 'pink', 'gray'] + modes = list(area_error_mode_et.keys()) + mode_markers = ['o', 'x', '^'] + mode_idx = 0 + for mode in area_error_mode_et.keys(): + et_arrays = list(area_error_mode_et[mode].keys()) + et_arrays.sort() + + for et in et_arrays: + areas = [item[0] for item in area_error_mode_et[mode][et]] + errors = [item[1] for item in area_error_mode_et[mode][et]] + plt.scatter(errors, areas, alpha=0.4, label=f'mode{mode}_et{et}', color=distinct_colors[color_idx % 8], marker=mode_markers[mode_idx]) + color_idx += 1 + mode_idx += 1 + + plt.title(f'{args.benchmark_name}, ') + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + plt.xticks() + plt.legend() + folder, _ = OUTPUT_PATH['figure'] + figname = f'{folder}/{args.benchmark_name}_per_et.png' + plt.savefig(figname) + def plot_area_error_best(args, sorted_area_error: List, area_error_mode_last_points_dict: Dict = None, mecals_area_error: List = None): @@ -758,6 +882,66 @@ def plot_area_error_mode_per_grid_dict(args, area_error_mode_per_grid_dict: Dict +def plot_area_error_per_omax(args, area_error_mode_per_grid_dict: Dict, mecals_area_error = None): + plt.figure(figsize=(10, 6)) + plt.title(f'{args.benchmark_name} per mode and grid') + marker = itertools.cycle(('D', 'o', 'X', 's')) + for omax in area_error_mode_per_grid_dict.keys(): + + + sorted_area_error_per_mode = area_error_mode_per_grid_dict[omax] + for grid in sorted_area_error_per_mode.keys(): + sorted_area_error = sorted_area_error_per_mode[grid] + areas = [item[0] for item in sorted_area_error] + errors = [item[1] for item in sorted_area_error] + plt.scatter(errors, areas, marker=next(marker), label=f'omax{omax}, grid{grid}' , alpha=0.5) + + if mecals_area_error: + mecals_areas = [item[0] for item in mecals_area_error] + mecals_errors = [item[1] for item in mecals_area_error] + plt.plot(mecals_errors, mecals_areas, 's--', label='MECALS', color='black') + plt.legend() + + # if area_error_mode_last_points_dict: + # for mode in area_error_mode_last_points_dict.keys(): + # sorted_area_error_per_mode = area_error_mode_last_points_dict[omax] + # for grid in sorted_area_error_per_mode.keys(): + # sorted_area_error = sorted_area_error_per_mode[grid] + # areas = [item[0] for item in sorted_area_error] + # errors = [item[1] for item in sorted_area_error] + # plt.scatter(errors, areas, marker='D', label=f'mode{mode}, grid{grid}', alpha=1) + + plt.legend() + plt.xlabel('ET') + plt.ylabel('Area') + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + + figname = f'{folder}/{args.benchmark_name}_omax.png' + plt.savefig(figname) + + +def plot_runtime_per_mode_and_omax(args, runtime_error_per_omax_per_mode): + plt.figure(figsize=(10, 6)) + plt.title(f'{args.benchmark_name} runtime') + marker = itertools.cycle(('D', 'o', 'X', 's')) + + for mode in runtime_error_per_omax_per_mode.keys(): + for omax in runtime_error_per_omax_per_mode[mode].keys(): + errors = [item[0] for item in runtime_error_per_omax_per_mode[mode][omax]] + runtimes = [item[1] for item in runtime_error_per_omax_per_mode[mode][omax]] + plt.plot(errors, runtimes, marker=next(marker), label=f'omax{omax}', alpha=1) + + plt.legend() + plt.xlabel('ET') + plt.ylabel('Runtime') + + plt.grid(True) + folder, _ = OUTPUT_PATH['figure'] + + figname = f'{folder}/{args.benchmark_name}_omax_runtime.png' + plt.savefig(figname) + def plot_runtime_aggregated(args, sorted_runtime_error): areas = [item[0] for item in sorted_runtime_error]