From ae4e5c58c37324b705e304e87796c87fd47942cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=86=E6=89=AC?= Date: Tue, 24 Nov 2020 14:09:52 +0800 Subject: [PATCH] Update README Update README Update README --- README.md | 11 ++++------- main.py | 4 ++-- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 6ded296a7..863c338b9 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,8 @@ I'm playing with [PyTorch](http://pytorch.org/) on the CIFAR10 dataset. | [DenseNet121](https://arxiv.org/abs/1608.06993) | 95.04% | | [PreActResNet18](https://arxiv.org/abs/1603.05027) | 95.11% | | [DPN92](https://arxiv.org/abs/1707.01629) | 95.16% | +| [DLA](https://arxiv.org/abs/1707.064) | 95.47% | -## Learning rate adjustment -I manually change the `lr` during training: -- `0.1` for epoch `[0,150)` -- `0.01` for epoch `[150,250)` -- `0.001` for epoch `[250,350)` - -Resume the training with `python main.py --resume --lr=0.01` +## Training +Start training with: `CUDA_VISIBLE_DEVICES=0 python main.py` +You can manually resume the training with: `CUDA_VISIBLE_DEVICES=0 python main.py --resume --lr=0.01` diff --git a/main.py b/main.py index 3491a845c..05ca1eb90 100644 --- a/main.py +++ b/main.py @@ -86,7 +86,7 @@ criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) -scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100) +scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) # Training @@ -148,7 +148,7 @@ def test(epoch): best_acc = acc -for epoch in range(start_epoch, start_epoch+100): +for epoch in range(start_epoch, start_epoch+200): train(epoch) test(epoch) scheduler.step()