Skip to content

Commit

Permalink
formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
priyakasimbeg committed Jul 2, 2024
1 parent a3e513e commit e125201
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 39 deletions.
54 changes: 30 additions & 24 deletions scoring/performance_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,28 +70,35 @@

#MPL params
mpl.rcParams['figure.figsize'] = (16, 10) # Width, height in inches
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = ['Times New Roman'] + mpl.rcParams['font.serif'] # Add Times New Roman as first choice
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = [
'Times New Roman'
] + mpl.rcParams['font.serif'] # Add Times New Roman as first choice
mpl.rcParams['font.size'] = 22
mpl.rcParams['savefig.dpi'] = 300 # Set resolution for saved figures

# Plot Elements
mpl.rcParams['lines.linewidth'] = 3 # Adjust line thickness if needed
mpl.rcParams['lines.markersize'] = 6 # Adjust marker size if needed
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd"]) # Example color cycle (consider ColorBrewer or viridis)
mpl.rcParams['axes.labelsize'] = 22 # Axis label font size
mpl.rcParams['xtick.labelsize'] = 20 # Tick label font size
mpl.rcParams['lines.linewidth'] = 3 # Adjust line thickness if needed
mpl.rcParams['lines.markersize'] = 6 # Adjust marker size if needed
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(
color=["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728",
"#9467bd"]) # Example color cycle (consider ColorBrewer or viridis)
mpl.rcParams['axes.labelsize'] = 22 # Axis label font size
mpl.rcParams['xtick.labelsize'] = 20 # Tick label font size
mpl.rcParams['ytick.labelsize'] = 20

# Legends and Gridlines
mpl.rcParams['legend.fontsize'] = 20 # Legend font size
mpl.rcParams['legend.loc'] = 'best' # Let matplotlib decide the best legend location
mpl.rcParams['axes.grid'] = True # Enable grid
mpl.rcParams['grid.alpha'] = 0.4 # Gridline transparency
mpl.rcParams['legend.fontsize'] = 20 # Legend font size
mpl.rcParams[
'legend.loc'] = 'best' # Let matplotlib decide the best legend location
mpl.rcParams['axes.grid'] = True # Enable grid
mpl.rcParams['grid.alpha'] = 0.4 # Gridline transparency


def print_dataframe(df):
tabulated_df = tabulate(df.T, headers='keys', tablefmt='psql')
logging.info(tabulated_df)
tabulated_df = tabulate(df.T, headers='keys', tablefmt='psql')
logging.info(tabulated_df)


def generate_eval_cols(metrics):
splits = ['train', 'validation']
Expand Down Expand Up @@ -206,11 +213,13 @@ def get_workloads_time_to_target(submission,
num_trials = len(group)
if num_trials != NUM_TRIALS and not self_tuning_ruleset:
if strict:
raise ValueError(f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
f'{workload} but found {num_trials} trials.')
raise ValueError(
f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
f'{workload} but found {num_trials} trials.')
else:
logging.warning(f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
f'{workload} but found {num_trials} trials.')
logging.warning(
f'In Study {study}: Expecting {NUM_TRIALS} trials for workload '
f'{workload} but found {num_trials} trials.')

# Get trial and time index that reaches target
trial_idx, time_idx = get_best_trial_index(
Expand Down Expand Up @@ -316,9 +325,8 @@ def compute_performance_profiles(submissions,
# If variants do not have finite score set base_workload score to inf
base_workload = get_base_workload_name(workload)
df[base_workload] = df.apply(
variant_criteria_filter(base_workload, workload),
axis=1)

variant_criteria_filter(base_workload, workload), axis=1)

logging.info("HELDOUT_WORKLOAD FILTER")
print_dataframe(df)

Expand Down Expand Up @@ -415,8 +423,7 @@ def plot_performance_profiles(perf_df,
df_col,
scale='linear',
save_dir=None,
figsize=(30, 10)
):
figsize=(30, 10)):
"""Plot performance profiles.
Args:
Expand All @@ -438,8 +445,7 @@ def plot_performance_profiles(perf_df,
"""
fig = perf_df.T.plot(figsize=figsize, alpha=0.7)
df_col_display = f'log10({df_col})' if scale == 'log' else df_col
fig.set_xlabel(
f'Ratio of `{df_col_display}` to best submission')
fig.set_xlabel(f'Ratio of `{df_col_display}` to best submission')
fig.set_ylabel('Proportion of workloads')
fig.legend(bbox_to_anchor=(1.0, 1.0))
plt.tight_layout()
Expand Down
30 changes: 15 additions & 15 deletions scoring/score_submissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,21 +124,21 @@ def main(_):
results = {}
os.makedirs(FLAGS.output_dir, exist_ok=True)

# for team in os.listdir(FLAGS.submission_directory):
# for submission in os.listdir(os.path.join(FLAGS.submission_directory, team)):
# print(submission)
# experiment_path = os.path.join(FLAGS.submission_directory, team, submission)
# df = scoring_utils.get_experiment_df(experiment_path)
# results[submission] = df
# summary_df = get_submission_summary(df)
# with open(os.path.join(FLAGS.output_dir, f'{submission}_summary.csv'),
# 'w') as fout:
# summary_df.to_csv(fout)
# # Save results
# with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'wb') as f:
# pickle.dump(results, f)
# for team in os.listdir(FLAGS.submission_directory):
# for submission in os.listdir(os.path.join(FLAGS.submission_directory, team)):
# print(submission)
# experiment_path = os.path.join(FLAGS.submission_directory, team, submission)
# df = scoring_utils.get_experiment_df(experiment_path)
# results[submission] = df
# summary_df = get_submission_summary(df)
# with open(os.path.join(FLAGS.output_dir, f'{submission}_summary.csv'),
# 'w') as fout:
# summary_df.to_csv(fout)

# # Save results
# with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'wb') as f:
# pickle.dump(results, f)

# Read results
with open(os.path.join(FLAGS.output_dir, 'results.pkl'), 'rb') as f:
results = pickle.load(f)
Expand Down

0 comments on commit e125201

Please sign in to comment.