Skip to content

Commit

Permalink
Shortening model names to be under 40 chars.
Browse files Browse the repository at this point in the history
  • Loading branch information
RoshaniN committed Dec 16, 2024
1 parent 77c8c04 commit cd1999e
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 10 deletions.
8 changes: 4 additions & 4 deletions benchmarks/benchmark_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ def add_shared_arguments(custom_parser: argparse.ArgumentParser):
'llama2_70b_4096_real_data',
'llama2_70b_4096_pw_long_run',
'llama2_70b_4096_real_data_pw_long_run',
'llama2_70b_4096_sc_synthetic_pw_lr',
'llama2_70b_4096_sc_synthetic',
'llama2_70b_4096_synthetic_pw_lr',
'llama2_70b_4096_synthetic',
'llama3_70b_8192',
'llama3_1_405b_8192_fsdp_dcn',
'mixtral_8x7b_dropped',
Expand All @@ -108,8 +108,8 @@ def add_shared_arguments(custom_parser: argparse.ArgumentParser):
'llama2_70b_4096_real_data '
'llama2_70b_4096_pw_long_run '
'llama2_70b_4096_real_data_pw_long_run '
'llama2_70b_4096_sc_synthetic_pw_lr '
'llama2_70b_4096_sc_synthetic '
'llama2_70b_4096_synthetic_pw_lr '
'llama2_70b_4096_synthetic '
'llama3_1_405b_8192_fsdp_dcn '
'mixtral_8x7b_dropped '
'mixtral_8x7b_dropped_int8 '
Expand Down
12 changes: 6 additions & 6 deletions benchmarks/maxtext_trillium_model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,8 @@ class MaxTextModel:
+ xla_flags_library.CF_FOR_ALL_GATHER
),
)
llama2_70b_4096_sc_synthetic = MaxTextModel(
model_name="llama2_70b_4096_sc_synthetic",
llama2_70b_4096_synthetic = MaxTextModel(
model_name="llama2_70b_4096_synthetic",
model_type="llama2-70b",
tuning_params={
"per_device_batch_size": 2,
Expand All @@ -385,8 +385,8 @@ class MaxTextModel:
),
)

llama2_70b_4096_sc_synthetic_pw_lr = MaxTextModel(
model_name="llama2_70b_4096_sc_synthetic_pw_lr",
llama2_70b_4096_synthetic_pw_lr = MaxTextModel(
model_name="llama2_70b_4096_synthetic_pw_lr",
model_type="llama2-70b",
tuning_params={
"per_device_batch_size": 2,
Expand Down Expand Up @@ -762,8 +762,8 @@ class MaxTextModel:
llama2_70b_4096_real_data_pw_long_run,
llama3_8b_8192, # Not Optimizied yet
llama3_70b_8192, # Not Optimizied yet
llama2_70b_4096_sc_synthetic_pw_lr,
llama2_70b_4096_sc_synthetic,
llama2_70b_4096_synthetic_pw_lr,
llama2_70b_4096_synthetic,
llama3_1_405b_8192_fsdp_dcn,
llama3_1_70b_129024,
mixtral_8x7b_dropped,
Expand Down

0 comments on commit cd1999e

Please sign in to comment.