forked from nlpxucan/WizardLM
-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathtrainwcoder.sh
38 lines (34 loc) · 1.2 KB
/
trainwcoder.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#########################################################################
# File Name: trainwcoder.sh
# Author: Xianchao Wu
# mail: [email protected]
# Created Time: Fri Aug 18 08:03:48 2023
#########################################################################
#!/bin/bash
#!/bin/bash
data="/workspace/asr/WizardLM/WizardCoder/data/code_alpaca_20k.json"
#outdir="/workspace/asr/Llama-X/src/checkpoints_wcode"
outdir="/workspace/asr/WizardLM/WizardCoder/ckpts"
#deepspeed src/train_wizardcoder.py \
python -m ipdb src/train_wizardcoder.py \
--model_name_or_path "bigcode/starcoder" \
--data_path $data \
--output_dir $outdir \
--cache_dir "/workspace/asr/WizardLM/WizardCoder" \
--num_train_epochs 3 \
--model_max_length 2048 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 1 \
--evaluation_strategy "no" \
--save_strategy "steps" \
--save_steps 50 \
--save_total_limit 2 \
--learning_rate 2e-5 \
--warmup_steps 30 \
--logging_steps 2 \
--lr_scheduler_type "cosine" \
--report_to "tensorboard" \
--gradient_checkpointing True \
--deepspeed configs/deepspeed_config.json \
--fp16 True