forked from Anynoumsiccv9970/G2P-DDM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_backtranslate.py
64 lines (52 loc) · 2.36 KB
/
train_backtranslate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from asyncio.log import logger
from configs.train_options import TrainOptions
import pytorch_lightning as pl
import argparse
import torch
from backmodels.point2text_model import BackTranslateModel
# from backmodels.point2text_model_2 import BackTranslateModel
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
# from data_phoneix.phoneix_text2pose_img_data_shift import PhoenixPoseData, PoseDataset
from data_phoneix.phonex_data import PhoenixPoseData
from util.util import CheckpointEveryNSteps
import os
from pytorch_lightning.loggers import NeptuneLogger
from data.vocabulary import Dictionary
def main():
pl.seed_everything(1234)
parser = argparse.ArgumentParser()
parser = BackTranslateModel.add_model_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
opt = TrainOptions(parser).parse()
# print(opt)
# print("opt.gpu_ids: ", opt.gpu_ids, type(opt.gpu_ids))
# exit()
# opt.world_size = int(os.environ['WORLD_SIZE'])
# opt.local_rank = int(os.environ['LOCAL_RANK'])
# opt.rank = int(os.environ['RANK'])
# print(opt.local_rank, opt.rank)
data = PhoenixPoseData(opt)
data.train_dataloader()
data.val_dataloader()
data.test_dataloader()
text_dict = Dictionary()
text_dict = text_dict.load(opt.vocab_file)
model = BackTranslateModel(opt, text_dict)
model = model.load_from_checkpoint("experiments/backmodel/lightning_logs/small_v0/checkpoints/epoch=21-step=19514-val_wer=0.5195.ckpt")
callbacks = []
model_save_ccallback = ModelCheckpoint(monitor="val_wer", filename='{epoch}-{step}-{val_wer:.4f}', save_top_k=-1, mode="min")
early_stop_callback = EarlyStopping(monitor="val_wer", min_delta=0.00, patience=20, verbose=False, mode="min")
callbacks.append(model_save_ccallback)
callbacks.append(early_stop_callback)
kwargs = dict()
if opt.gpus > 1:
kwargs = dict(accelerator='cuda', gpus=opt.gpus, strategy="ddp")
trainer = pl.Trainer.from_argparse_args(opt, callbacks=callbacks,
max_steps=2000000, **kwargs)
# print(torch.distributed.get_rank())
# trainer.fit(model, data)
trainer.validate(model, dataloaders=data.test_dataloader())
trainer.validate(model, dataloaders=data.val_dataloader())
if __name__ == "__main__":
main()