Skip to content

Commit

Permalink
Add gated command to help asking for permission to huggingface
Browse files Browse the repository at this point in the history
  • Loading branch information
pierre.delaunay committed Jan 13, 2025
1 parent 4dc0ba9 commit 137e4ae
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 3 deletions.
13 changes: 13 additions & 0 deletions config/base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,14 @@ llama:
group: llm
install_group: torch
max_duration: 3600
url: https://huggingface.co/meta-llama/Llama-2-7b/tree/main
tags:
- nlp
- llm
- inference
- monogpu
- nobatch
- gated

voir:
options:
Expand Down Expand Up @@ -541,6 +543,8 @@ _llm:
tags:
- nlp
- llm
- gated

max_duration: 3600
num_machines: 1
inherits: _defaults
Expand All @@ -549,6 +553,7 @@ _llm:

llm-lora-single:
inherits: _llm
url: https://huggingface.co/meta-llama/Llama-3.1-8B
tags:
- monogpu
plan:
Expand All @@ -574,8 +579,11 @@ llm-lora-ddp-gpus:
plan:
method: njobs
n: 1

url: https://huggingface.co/meta-llama/Llama-3.1-8B
tags:
- multigpu

argv:
"{milabench_code}/recipes/lora_finetune_distributed.py": true
--config: "{milabench_code}/configs/llama3_8B_lora_single_device.yaml"
Expand All @@ -599,6 +607,7 @@ llm-lora-ddp-nodes:
method: njobs
n: 1

url: https://huggingface.co/meta-llama/Llama-3.1-8B
argv:
"{milabench_code}/recipes/lora_finetune_distributed.py": true
--config: "{milabench_code}/configs/llama3_8B_lora_single_device.yaml"
Expand All @@ -618,6 +627,7 @@ llm-lora-ddp-nodes:

llm-lora-mp-gpus:
inherits: _llm
url: https://huggingface.co/meta-llama/Llama-3.1-70B
tags:
- multigpu
plan:
Expand All @@ -644,6 +654,8 @@ llm-full-mp-gpus:
options:
stop: 30
inherits: _llm

url: https://huggingface.co/meta-llama/Llama-3.1-70B
tags:
- multigpu
plan:
Expand All @@ -666,6 +678,7 @@ llm-full-mp-gpus:
device={device_name}: true

llm-full-mp-nodes:
url: https://huggingface.co/meta-llama/Llama-3.1-70B
tags:
- multinode
max_duration: 3600
Expand Down
6 changes: 3 additions & 3 deletions milabench/_version.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""This file is generated, do not modify"""

__tag__ = "v0.1.0-146-ga8415d3"
__commit__ = "a8415d3da9f91aa1ac23d932dff2c70fe580e556"
__date__ = "2024-11-21 14:35:55 -0500"
__tag__ = "v1.0.0-7-g4dc0ba9"
__commit__ = "4dc0ba9f3b35119dc1947d6dc8f6502e5d34dd50"
__date__ = "2024-12-11 14:44:23 -0500"
5 changes: 5 additions & 0 deletions milabench/cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
from .new import cli_new
from .env import cli_env
from .prepare_run import cli_prepare_run
from .gated import cli_gated


class Main:
def new():
Expand Down Expand Up @@ -103,6 +105,9 @@ def env():
def prepare_run():
cli_prepare_run()

def gated():
cli_gated()


def main(argv=None):
sys.path.insert(0, os.path.abspath(os.curdir))
Expand Down
34 changes: 34 additions & 0 deletions milabench/cli/gated.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@



from milabench.common import arguments, _get_multipack


def cli_gated():
args = arguments()

benchmarks = _get_multipack(args, return_config=True)
gated_bench = []

for bench, config in benchmarks.items():
tags = config.get("tags", [])

if "gated" in tags and 'url' in config:
gated_bench.append((bench, config))

if len(gated_bench) > 0:
print("benchmark use gated models or datasets")
print("You need to request permission to huggingface")
print()
for bench, config in gated_bench:
print(f"{bench}")
print(f" url: {config.get('url')}")

print()
print("Create a new token")
print(" - https://huggingface.co/settings/tokens/new?tokenType=read")
print("")
print("Add your token to your environment")
print(" export MILABENCH_HF_TOKEN={your_token}")
print("")
print("Now you are ready to execute `milabench prepare`")

0 comments on commit 137e4ae

Please sign in to comment.