forked from CERC-AAI/multimodal
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsummit-70m-openclipH.yml
100 lines (85 loc) · 2.18 KB
/
summit-70m-openclipH.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
{
# image_prefix settings
"encoder_name": "openclip-H",
# "encoder_name": "nfresnet50",
"pretrained_img_encoder": true,
"use_image_embed_layernorm": true,
"image_embed_dropout_prob": 0.1,
# adapter settings
"add_adapters": true,
"adaper_downsample_factor": 8,
"freeze-lm": true,
# data resample
"dataset_resampled": true,
# pythia-70m
"pipe-parallel-size": 1,
"model-parallel-size": 1,
"num-layers": 6,
"hidden-size": 512,
"num-attention-heads": 8,
"seq-length": 2048,
"max-position-embeddings": 2048,
"pos-emb": "rotary",
"rotary-pct": 0.25,
"no-weight-tying": true,
"gpt-j-residual": true,
"output-layer-parallelism": "column",
# "attention-config": [[["flash"], 6]], flash attention not works
"scaled-upper-triang-masked-softmax-fusion": false,
"bias-gelu-fusion": false,
"init_method": "small_init",
"output_layer_init_method": "wang_init",
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001,
"betas": [0.9, 0.95],
"eps": 1.0e-8
}
},
"min_lr": 0.0001,
"zero_optimization": {
"stage": 1,
"allgather_partitions": true,
"allgather_bucket_size": 500000000,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 500000000,
"contiguous_gradients": true,
"cpu_offload": false
},
"train_micro_batch_size_per_gpu": 4,
"gas": 1,
"data-impl": "mmap",
"num_workers": 0,
"checkpoint-activations": true,
"checkpoint-num-layers": 1,
"partition-activations": true,
"synchronize-each-layer": true,
"gradient_clipping": 1.0,
"weight-decay": 0.1,
"hidden-dropout": 0,
"attention-dropout": 0,
"fp16": {
"fp16": true,
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 12,
"hysteresis": 2,
"min_loss_scale": 1
},
"train-iters": 143000,
"lr-decay-iters": 143000,
"distributed-backend": "nccl",
"lr-decay-style": "cosine",
"warmup": 0.01,
"checkpoint-factor": 1000,
# "extra-save-iters": [0,1,2,4,8,16,32,64,128,256,512],
"eval-interval": 100000,
"eval-iters": 10,
"log-interval": 10,
"steps_per_print": 10,
"wall_clock_breakdown": true,
"tokenizer-type": "HFTokenizer"
}