-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathmain.py
96 lines (74 loc) · 5.46 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import argparse
from trainer import Trainer, Trainer_multi, Trainer_woISeq
from utils import init_logger, load_tokenizer, read_prediction_text, set_seed, MODEL_CLASSES, MODEL_PATH_MAP
from data_loader import load_and_cache_examples
from datetime import datetime
import random
import time
def main(args):
init_logger(args)
set_seed(args)
tokenizer = load_tokenizer(args)
train_dataset = load_and_cache_examples(args, tokenizer, mode="train")
dev_dataset = load_and_cache_examples(args, tokenizer, mode="dev")
test_dataset = load_and_cache_examples(args, tokenizer, mode="test")
if args.multi_intent == 1:
trainer = Trainer_multi(args, train_dataset, dev_dataset, test_dataset)
else:
trainer = Trainer(args, train_dataset, dev_dataset, test_dataset)
if args.do_train:
trainer.train()
if args.do_eval:
trainer.load_model()
trainer.evaluate("test")
if __name__ == '__main__':
time_wait = random.uniform(0, 10)
time.sleep(time_wait)
parser = argparse.ArgumentParser()
parser.add_argument("--task", default=None, required=True, type=str, help="The name of the task to train")
parser.add_argument("--model_dir", default=None, required=True, type=str, help="Path to save, load model")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir")
parser.add_argument("--intent_label_file", default="intent_label.txt", type=str, help="Intent Label file")
parser.add_argument("--slot_label_file", default="slot_label.txt", type=str, help="Slot Label file")
parser.add_argument("--model_type", default="bert", type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--intent_seq", type=int, default=0, help="whether we use intent seq setting")
parser.add_argument("--multi_intent", type=int, default=0, help="whether we use multi intent setting")
parser.add_argument("--tag_intent", type=int, default=0, help="whether we can use tag to predict intent")
parser.add_argument("--BI_tag", type=int, default=0, help='use BI sum or just B')
parser.add_argument("--cls_token_cat", type=int, default=0, help='whether we cat the cls to the slot output of bert')
parser.add_argument("--intent_attn", type=int, default=0, help='whether we use attention mechanism on the CLS intent output')
parser.add_argument("--num_mask", type=int, default=4, help="assumptive number of slot in one sentence")
parser.add_argument('--seed', type=int, default=1234, help="random seed for initialization")
parser.add_argument("--train_batch_size", default=256, type=int, help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=128, type=int, help="Batch size for evaluation.")
parser.add_argument("--max_seq_len", default=50, type=int, help="The maximum total input sequence length after tokenization.")
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=10.0, type=float, help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1, type=float, help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--dropout_rate", default=0.1, type=float, help="Dropout for fully-connected layers")
parser.add_argument('--logging_steps', type=int, default=500, help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=200, help="Save checkpoint every X updates steps.")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--ignore_index", default=0, type=int,
help='Specifies a target value that is ignored and does not contribute to the input gradient')
parser.add_argument('--slot_loss_coef', type=float, default=1.0, help='Coefficient for the slot loss.')
parser.add_argument('--tag_intent_coef', type=float, default=1.0, help='Coefficient for the tag intent loss')
# CRF option
parser.add_argument("--use_crf", action="store_true", help="Whether to use CRF")
parser.add_argument("--slot_pad_label", default="PAD", type=str, help="Pad token for slot label pad (to be ignore when calculate loss)")
parser.add_argument("--patience", default=0, type=int, help="The initial learning rate for Adam.")
args = parser.parse_args()
if args.model_dir[-1] == '/':
args.model_dir = args.model_dir[:-1]
now = datetime.now()
args.model_dir = args.model_dir + '_' + now.strftime('%m-%d-%H:%M:%S')
args.model_name_or_path = MODEL_PATH_MAP[args.model_type]
main(args)