From 318e504a06b30e77a743109a692e75a154d892fc Mon Sep 17 00:00:00 2001 From: BBC Date: Tue, 21 Jun 2022 23:08:26 -0400 Subject: [PATCH 01/54] Add args input and pruning --- main.py | 73 +++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 20 deletions(-) diff --git a/main.py b/main.py index 05ca1eb90..d1f5c284e 100644 --- a/main.py +++ b/main.py @@ -19,6 +19,13 @@ parser.add_argument('--lr', default=0.1, type=float, help='learning rate') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', default=False) +parser.add_argument('--test', default=False) +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--prune', type=bool, default=False) +parser.add_argument('--prune_rate', type=float, default=0.30) + args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' @@ -54,21 +61,42 @@ # Model print('==> Building model..') -# net = VGG('VGG19') -# net = ResNet18() -# net = PreActResNet18() -# net = GoogLeNet() -# net = DenseNet121() -# net = ResNeXt29_2x64d() -# net = MobileNet() -# net = MobileNetV2() -# net = DPN92() -# net = ShuffleNetG2() -# net = SENet18() -# net = ShuffleNetV2(1) -# net = EfficientNetB0() -# net = RegNetX_200MF() -net = SimpleDLA() +if args.net == 'VGG19': net = VGG('VGG19') +elif args.net == 'ResNet18': net = ResNet18() +elif args.net == 'PreActResNet18': net = PreActResNet18() +elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'DenseNet121': net = DenseNet121() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() +elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'DPN92': net = DPN92() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'SENet18': net = SENet18() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) +elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() +elif args.net == 'SimpleDLA': net = SimpleDLA() + +# Borrow sparsity() and prune() from +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L1 +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net) @@ -78,7 +106,7 @@ # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' - checkpoint = torch.load('./checkpoint/ckpt.pth') + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net)) net.load_state_dict(checkpoint['net']) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] @@ -115,6 +143,9 @@ def train(epoch): def test(epoch): global best_acc + if args.prune: + prune(net, args.prune_rate) + net.eval() test_loss = 0 correct = 0 @@ -144,11 +175,13 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/ckpt.pth') + torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) best_acc = acc -for epoch in range(start_epoch, start_epoch+200): - train(epoch) - test(epoch) +for epoch in range(args.epochs): + if args.train: train(epoch) + if args.test: + test(epoch) + if not args.train: break scheduler.step() From 38912e4695ef1c1a828ac6d0a33b919ea5735484 Mon Sep 17 00:00:00 2001 From: BBC Date: Tue, 21 Jun 2022 23:15:45 -0400 Subject: [PATCH 02/54] Update args and pruning reference --- main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/main.py b/main.py index d1f5c284e..fe5971aa9 100644 --- a/main.py +++ b/main.py @@ -19,12 +19,12 @@ parser.add_argument('--lr', default=0.1, type=float, help='learning rate') parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') -parser.add_argument('--net', default='SimpleDLA') -parser.add_argument('--train', default=False) -parser.add_argument('--test', default=False) +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', type=bool, default=False) +parser.add_argument('--test', type=bool, default=False) parser.add_argument('--epochs', type=int, default=200) parser.add_argument('--prune', type=bool, default=False) -parser.add_argument('--prune_rate', type=float, default=0.30) +parser.add_argument('--pruning_rate', type=float, default=0.30) args = parser.parse_args() @@ -78,7 +78,7 @@ elif args.net == 'SimpleDLA': net = SimpleDLA() # Borrow sparsity() and prune() from -# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L1 +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 def sparsity(model): # Return global model sparsity a, b = 0, 0 From 713fa09890057e60f6af9a6cae91e2808669e12a Mon Sep 17 00:00:00 2001 From: BBC Date: Tue, 21 Jun 2022 23:17:42 -0400 Subject: [PATCH 03/54] Add commands with args --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 96fd1d260..d153e66f9 100644 --- a/README.md +++ b/README.md @@ -9,10 +9,16 @@ I'm playing with [PyTorch](http://pytorch.org/) on the CIFAR10 dataset. ## Training ``` # Start training with: -python main.py +python main.py --net VGG16 --train True --test True # You can manually resume the training with: -python main.py --resume --lr=0.01 +python main.py --net VGG16 --train True --test True --resume --lr=0.01 + +# Test only +python main.py --net VGG16 --test True + +# Test only with pruning (0.3) +python main.py --net VGG16 --test True --prune True --pruning_rate 0.3 ``` ## Accuracy @@ -33,3 +39,4 @@ python main.py --resume --lr=0.01 | [DPN92](https://arxiv.org/abs/1707.01629) | 95.16% | | [DLA](https://arxiv.org/pdf/1707.06484.pdf) | 95.47% | +Pruning [Reference Link](https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174) From 29373650aa156069e344a78968f622ee24d7bb78 Mon Sep 17 00:00:00 2001 From: BBC Date: Wed, 22 Jun 2022 17:01:42 -0400 Subject: [PATCH 04/54] Add nohup version --- main.py | 5 +- main_nohup.py | 182 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 2 deletions(-) create mode 100644 main_nohup.py diff --git a/main.py b/main.py index fe5971aa9..fa8258833 100644 --- a/main.py +++ b/main.py @@ -144,7 +144,7 @@ def train(epoch): def test(epoch): global best_acc if args.prune: - prune(net, args.prune_rate) + prune(net, args.pruning_rate) net.eval() test_loss = 0 @@ -160,7 +160,7 @@ def test(epoch): _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() - + progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) @@ -185,3 +185,4 @@ def test(epoch): test(epoch) if not args.train: break scheduler.step() + diff --git a/main_nohup.py b/main_nohup.py new file mode 100644 index 000000000..0d90347f7 --- /dev/null +++ b/main_nohup.py @@ -0,0 +1,182 @@ +'''Train CIFAR10 with PyTorch.''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse + +from models import * + + +parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') +parser.add_argument('--lr', default=0.1, type=float, help='learning rate') +parser.add_argument('--resume', '-r', action='store_true', + help='resume from checkpoint') +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', type=bool, default=False) +parser.add_argument('--test', type=bool, default=False) +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--prune', type=bool, default=False) +parser.add_argument('--pruning_rate', type=float, default=0.30) + +args = parser.parse_args() + +device = 'cuda' if torch.cuda.is_available() else 'cpu' +best_acc = 0 # best test accuracy +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +trainset = torchvision.datasets.CIFAR10( + root='./data', train=True, download=True, transform=transform_train) +trainloader = torch.utils.data.DataLoader( + trainset, batch_size=128, shuffle=True, num_workers=2) + +testset = torchvision.datasets.CIFAR10( + root='./data', train=False, download=True, transform=transform_test) +testloader = torch.utils.data.DataLoader( + testset, batch_size=100, shuffle=False, num_workers=2) + +classes = ('plane', 'car', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck') + +# Model +print('==> Building model..') +if args.net == 'VGG19': net = VGG('VGG19') +elif args.net == 'ResNet18': net = ResNet18() +elif args.net == 'PreActResNet18': net = PreActResNet18() +elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'DenseNet121': net = DenseNet121() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() +elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'DPN92': net = DPN92() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'SENet18': net = SENet18() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) +elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() +elif args.net == 'SimpleDLA': net = SimpleDLA() + +# Borrow sparsity() and prune() from +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + +net = net.to(device) +if device == 'cuda': + net = torch.nn.DataParallel(net) + cudnn.benchmark = True + +if args.resume: + # Load checkpoint. + print('==> Resuming from checkpoint..') + assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net)) + net.load_state_dict(checkpoint['net']) + best_acc = checkpoint['acc'] + start_epoch = checkpoint['epoch'] + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=args.lr, + momentum=0.9, weight_decay=5e-4) +scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) + + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + +def test(epoch): + global best_acc + if args.prune: + prune(net, args.pruning_rate) + + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + + # Save checkpoint. + acc = 100.*correct/total + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + best_acc = acc + + +for epoch in range(args.epochs): + if args.train: train(epoch) + if args.test: + test(epoch) + if not args.train: break + scheduler.step() + From b782bba05821e2a31871b46adf9d33da3b00e036 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 24 Jun 2022 13:47:04 -0400 Subject: [PATCH 05/54] --train --test with action | Save on GPU, test on CPU --- main.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/main.py b/main.py index fa8258833..df574e338 100644 --- a/main.py +++ b/main.py @@ -20,15 +20,17 @@ parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--net', default='SimpleDLA') -parser.add_argument('--train', type=bool, default=False) -parser.add_argument('--test', type=bool, default=False) +parser.add_argument('--train', action='store_true') +parser.add_argument('--test', action='store_true') parser.add_argument('--epochs', type=int, default=200) -parser.add_argument('--prune', type=bool, default=False) +parser.add_argument('--prune', action='store_true') parser.add_argument('--pruning_rate', type=float, default=0.30) +parser.add_argument('--test_batch_size', type=int, default=100) +parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') args = parser.parse_args() -device = 'cuda' if torch.cuda.is_available() else 'cpu' +device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch @@ -54,7 +56,7 @@ testset = torchvision.datasets.CIFAR10( root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( - testset, batch_size=100, shuffle=False, num_workers=2) + testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') @@ -106,10 +108,15 @@ def prune(model, amount=0.3): # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' - checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net)) - net.load_state_dict(checkpoint['net']) + + print('\n\ndevice: ', device) + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location='cpu') # device) + # print('\n\n checkpoint.keys(): ', checkpoint.keys()) + # print('\n\n checkpoint[net].keys(): ', checkpoint['net'].keys()) + net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] + # net = net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, @@ -152,6 +159,7 @@ def test(epoch): total = 0 with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): + print('device: ', device) inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) @@ -178,7 +186,7 @@ def test(epoch): torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) best_acc = acc - +print('\n\nargs.train: ', args.train, ', args.test:', args.test) for epoch in range(args.epochs): if args.train: train(epoch) if args.test: From aea8bde1f3b27f82e80c419ea072b4903f0640d3 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 24 Jun 2022 13:56:20 -0400 Subject: [PATCH 06/54] --train --test with action --- main_nohup.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/main_nohup.py b/main_nohup.py index 0d90347f7..0ac30f9e5 100644 --- a/main_nohup.py +++ b/main_nohup.py @@ -19,11 +19,13 @@ parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--net', default='SimpleDLA') -parser.add_argument('--train', type=bool, default=False) -parser.add_argument('--test', type=bool, default=False) +parser.add_argument('--train', action='store_true') +parser.add_argument('--test', action='store_true') parser.add_argument('--epochs', type=int, default=200) -parser.add_argument('--prune', type=bool, default=False) +parser.add_argument('--prune', action='store_true') parser.add_argument('--pruning_rate', type=float, default=0.30) +parser.add_argument('--test_batch_size', type=int, default=100) +parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') args = parser.parse_args() @@ -53,7 +55,7 @@ testset = torchvision.datasets.CIFAR10( root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader( - testset, batch_size=100, shuffle=False, num_workers=2) + testset, batch_size=args.test_batch_size, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') From b9f693a4b6b70d83f8b669168ddafac626c66877 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 24 Jun 2022 13:58:10 -0400 Subject: [PATCH 07/54] Test on CPU --- README.md | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index d153e66f9..71b0be3fa 100644 --- a/README.md +++ b/README.md @@ -9,16 +9,22 @@ I'm playing with [PyTorch](http://pytorch.org/) on the CIFAR10 dataset. ## Training ``` # Start training with: -python main.py --net VGG16 --train True --test True +python main.py --net ResNet18 --train --test # You can manually resume the training with: -python main.py --net VGG16 --train True --test True --resume --lr=0.01 +python main.py --net ResNet18 --train --test --resume --lr=0.01 +``` + +## Testing +``` +# Test only on GPU +python main.py --net ResNet18 --test -# Test only -python main.py --net VGG16 --test True +# Test only on GPU with pruning (0.3) +python main.py --net ResNet18 --test --prune --pruning_rate 0.3 -# Test only with pruning (0.3) -python main.py --net VGG16 --test True --prune True --pruning_rate 0.3 +# Test only on CPU +python main.py --net ResNet18 --test --select_device cpu ``` ## Accuracy From 3102ecf4ee0d63a942ededbf47322dd85c66fa3e Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 24 Jun 2022 14:03:35 -0400 Subject: [PATCH 08/54] Clean main.py --- main.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/main.py b/main.py index df574e338..0493f0740 100644 --- a/main.py +++ b/main.py @@ -110,13 +110,10 @@ def prune(model, amount=0.3): assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' print('\n\ndevice: ', device) - checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location='cpu') # device) - # print('\n\n checkpoint.keys(): ', checkpoint.keys()) - # print('\n\n checkpoint[net].keys(): ', checkpoint['net'].keys()) + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] - # net = net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, From c85353c2e3d69b87bcdf3ce4bf78e66c4fb658f0 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 24 Jun 2022 15:01:40 -0400 Subject: [PATCH 09/54] Print summary of net --- main.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/main.py b/main.py index 0493f0740..d90c5487b 100644 --- a/main.py +++ b/main.py @@ -4,6 +4,7 @@ import torch.optim as optim import torch.nn.functional as F import torch.backends.cudnn as cudnn +from torchinfo import summary import torchvision import torchvision.transforms as transforms @@ -149,6 +150,8 @@ def test(epoch): global best_acc if args.prune: prune(net, args.pruning_rate) + input_size = (1, 3, 32, 32) + summary(net, input_size) net.eval() test_loss = 0 From 18edadddfa4be8fa8ad2f492613d403216df7045 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 24 Jun 2022 15:36:11 -0400 Subject: [PATCH 10/54] Print layer params --- main.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index d90c5487b..7301c5cff 100644 --- a/main.py +++ b/main.py @@ -14,7 +14,7 @@ from models import * from utils import progress_bar - +import time parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') parser.add_argument('--lr', default=0.1, type=float, help='learning rate') @@ -100,6 +100,27 @@ def prune(model, amount=0.3): prune.remove(m, 'weight') # make permanent print(' %.3g global sparsity' % sparsity(model)) + +def count_layer_params(model, layer_name=nn.Conv2d): + print('\n\n layer_name: ', layer_name) + total_params = 0 + total_traina_params = 0 + n_layers = 0 + for name, m in model.named_modules(): + if isinstance(m, layer_name): + # print('\nm:', m) + # print('\ndir(m): ', dir(m)) + + for name, parameter in m.named_parameters(): + params = parameter.numel() + total_params += params + if not parameter.requires_grad: continue + n_layers += 1 + total_traina_params += params + print('\n\nlayer_name: {}, total_params: {}, total_traina_params: {}, n_layers: {}'.\ + format(layer_name, total_params, total_traina_params, n_layers)) + time.sleep(100) + net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net) @@ -152,6 +173,8 @@ def test(epoch): prune(net, args.pruning_rate) input_size = (1, 3, 32, 32) summary(net, input_size) + count_layer_params(net) + net.eval() test_loss = 0 From ba1df4223a15459d30a2b960479ec9a18196a574 Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 30 Jun 2022 19:59:38 -0400 Subject: [PATCH 11/54] Add Trained Weights --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 71b0be3fa..1b750d9d8 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,9 @@ python main.py --net ResNet18 --test --prune --pruning_rate 0.3 python main.py --net ResNet18 --test --select_device cpu ``` +# Trained Weights +[Google Drive](https://drive.google.com/drive/folders/1DRcb7uw1goot8doydHAc0ip3us5zjilk?usp=sharing) + ## Accuracy | Model | Acc. | | ----------------- | ----------- | From b4cbcd284e7070a11dd747e493d5197c62e57e89 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 1 Jul 2022 20:15:17 -0400 Subject: [PATCH 12/54] test_sim.py --- test_sim.py | 302 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 302 insertions(+) create mode 100644 test_sim.py diff --git a/test_sim.py b/test_sim.py new file mode 100644 index 000000000..b3eb3c207 --- /dev/null +++ b/test_sim.py @@ -0,0 +1,302 @@ +'''Train CIFAR10 with PyTorch.''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn +from torchinfo import summary + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse + +from models import * +from utils import progress_bar +import time +import numpy as np +import matplotlib.pyplot as plt +import cv2 + +parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') +parser.add_argument('--lr', default=0.1, type=float, help='learning rate') +parser.add_argument('--resume', '-r', action='store_true', + help='resume from checkpoint') +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', action='store_true') +parser.add_argument('--test', action='store_true') +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--prune', action='store_true') +parser.add_argument('--pruning_rate', type=float, default=0.30) +parser.add_argument('--test_batch_size', type=int, default=100) +parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') + +args = parser.parse_args() + +device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' +best_acc = 0 # best test accuracy +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +trainset = torchvision.datasets.CIFAR10( + root='./data', train=True, download=True, transform=transform_train) +trainloader = torch.utils.data.DataLoader( + trainset, batch_size=128, shuffle=True, num_workers=2) + +testset = torchvision.datasets.CIFAR10( + root='./data', train=False, download=True, transform=transform_test) +testloader = torch.utils.data.DataLoader( + testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) + +classes = ('plane', 'car', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck') + +# Model +print('==> Building model..') +if args.net == 'VGG19': net = VGG('VGG19') +elif args.net == 'ResNet18': net = ResNet18() +elif args.net == 'PreActResNet18': net = PreActResNet18() +elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'DenseNet121': net = DenseNet121() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() +elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'DPN92': net = DPN92() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'SENet18': net = SENet18() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) +elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() +elif args.net == 'SimpleDLA': net = SimpleDLA() + +# Borrow sparsity() and prune() from +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def count_layer_params(model, layer_name=nn.Conv2d): + print('\n\n layer_name: ', layer_name) + total_params = 0 + total_traina_params = 0 + n_layers = 0 + for name, m in model.named_modules(): + if isinstance(m, layer_name): + # print('\nm:', m) + # print('\ndir(m): ', dir(m)) + + for name, parameter in m.named_parameters(): + params = parameter.numel() + total_params += params + if not parameter.requires_grad: continue + n_layers += 1 + total_traina_params += params + print('\n\nlayer_name: {}, total_params: {}, total_traina_params: {}, n_layers: {}'.\ + format(layer_name, total_params, total_traina_params, n_layers)) + # time.sleep(100) + +net = net.to(device) +if device == 'cuda': + if args.train: net = torch.nn.DataParallel(net) + cudnn.benchmark = True + +if args.resume: + # Load checkpoint. + print('==> Resuming from checkpoint..') + assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' + + print('\n\ndevice: ', device) + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) + net.load_state_dict(checkpoint['net'], strict=False) + print('\n model weights loaded!') + best_acc = checkpoint['acc'] + start_epoch = checkpoint['epoch'] + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=args.lr, + momentum=0.9, weight_decay=5e-4) +scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) + + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + +def test(epoch): + global best_acc + if args.prune: + prune(net, args.pruning_rate) + input_size = (1, 3, 32, 32) + summary(net, input_size) + count_layer_params(net) + + # print('------------') + # print(net) + + def extract_features_labels(my_net, my_data): + intermediate_outputs = [] + + def hook(module, input, output): + for output_each in output: + intermediate_outputs.append(output_each.detach().cpu().tolist()) # intermediate outputs + + # print('np.shape(my_data): ', np.shape(my_data)) + # print('\n net: ', net) + # print('\n dir(my_net): ', dir(my_net)) + # print('\n dir(my_net.children()): ', dir(my_net.children())) + my_net.layer4[1].shortcut.register_forward_hook(hook) # intermediate outputs from the third last fc layer + + features = [] + labels = [] + # print('\n np.shape(my_data): ', np.shape(my_data)) + ''' + e.g. + np.shape(my_data): torch.Size([1, 3, 32, 32]) + ''' + # e.g. torch.Size([1, 3, 32, 32]) + # for step, (x, y) in enumerate(my_data): + # batch_x = Variable(x) + # batch_y = Variable(y) + # output = my_net(batch_x) + # labels.extend(batch_y.numpy().tolist()) + + output = my_net(my_data) + features = torch.from_numpy(np.array(intermediate_outputs)) + # print('\n np.shape(output): ', np.shape(output)) + # print('\n np.shape(np.array(features)): ', np.shape(np.array(features))) + ''' + e.g. + np.shape(output): torch.Size([1, 10]) + np.shape(np.array(features)): (1, 512, 4, 4) + ''' + # features = torch.flatten(features) + # print('\n after flattened - np.shape(features): ', np.shape(features)) + # return np.array(features) + # print('\n np.shape(features): ', np.shape(features)) + return features + + def vis_compute_sim(net, input_0, input_1): + feats_0 = extract_features_labels(net, input_0) + feats_1 = extract_features_labels(net, input_1) + input_0 = (input_0 / 2 + 0.5) # * 255 + input_1 = (input_1 / 2 + 0.5) # * 255 + + plt.imshow(np.transpose(torch.squeeze(input_0).cpu(), (1, 2, 0))); plt.show() + plt.imshow(np.transpose(torch.squeeze(input_1).cpu(), (1, 2, 0))); plt.show() + # sim_score = np.cos(np.array(feats_0).flatten(), np.array(feats_1).flatten()) + print('\n np.shape(feats_0): ', np.shape(feats_0)) + # torch.Size([1, 512, 4, 4]) + cos = nn.CosineSimilarity(dim=1, eps=1e-6) + sim_score = cos(feats_0, feats_1) + print('\n np.shape(sim_score): ', np.shape(sim_score)) + sim_score = sim_score[0][0][0] # torch.mean(sim_score) + return sim_score + + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + # print('device: ', device) + # print('\n np.shape(inputs): ', np.shape(inputs)) + # e.g. torch.Size([5, 3, 32, 32]) + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + # print('targets: ', targets) + # print('\n np.shape(targets): ', np.shape(targets)) + # print('\n np.shape(inputs): ', np.shape(inputs)) + # e.g. + # np.shape(targets): torch.Size([5]) + # np.shape(inputs): torch.Size([5, 3, 32, 32]) # [batch_size, channels, , ] + + for i in range(args.test_batch_size): + for j in range(args.test_batch_size): + input_0, input_1 = torch.unsqueeze(inputs[i], dim=0), torch.unsqueeze(inputs[j], dim=0) + sim_score = vis_compute_sim(net, input_0, input_1) + print('\n =====================') + print('\n i: ', i, ', j: ', j) + print('\n targets[i]: ', targets[i], ', targets[j]: ', targets[j]) + print('\n sim_score: ', sim_score) + print('\n ---------------------') + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + # Save checkpoint. + acc = 100.*correct/total + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + best_acc = acc + +print('\n\nargs.train: ', args.train, ', args.test:', args.test) +for epoch in range(args.epochs): + if args.train: train(epoch) + if args.test: + test(epoch) + if not args.train: break + scheduler.step() From 10d882895c6f065bd944c1780ac7de95557c6323 Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 7 Jul 2022 18:10:59 -0400 Subject: [PATCH 13/54] main_n_cls.py --- main_n_cls.py | 301 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 301 insertions(+) create mode 100644 main_n_cls.py diff --git a/main_n_cls.py b/main_n_cls.py new file mode 100644 index 000000000..4b0e45ef8 --- /dev/null +++ b/main_n_cls.py @@ -0,0 +1,301 @@ +'''Train CIFAR10 with PyTorch.''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn +from torchinfo import summary + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse + +from models import * +from utils import progress_bar +import time +import numpy as np + +parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') +parser.add_argument('--lr', default=0.1, type=float, help='learning rate') +parser.add_argument('--resume', '-r', action='store_true', + help='resume from checkpoint') +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', action='store_true') +parser.add_argument('--test', action='store_true') +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--prune', action='store_true') +parser.add_argument('--pruning_rate', type=float, default=0.30) +parser.add_argument('--train_batch_size', type=int, default=128) +parser.add_argument('--test_batch_size', type=int, default=100) +parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') +parser.add_argument('--num_class', type=int, default=10) + +args = parser.parse_args() + +device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' +best_acc = 0 # best test accuracy +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +def prepare_dataset(num_class=args.num_class): + trainset = torchvision.datasets.CIFAR10( + root='./data', train=True, download=True, transform=transform_train) + trainloader_all_cls = torch.utils.data.DataLoader( + trainset, batch_size=args.train_batch_size, shuffle=True, num_workers=2) + + testset = torchvision.datasets.CIFAR10( + root='./data', train=False, download=True, transform=transform_test) + testloader_all_cls = torch.utils.data.DataLoader( + testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) + + n_cls_ls = list(range(num_class)) + + # Prepare n_cls data for train set + train_inputs_n_cls, train_targets_n_cls = None, None + for batch_idx, (inputs, targets) in enumerate(trainloader_all_cls): + inputs, targets = inputs.to(device), targets.to(device) + # print('\n train() - np.shape(inputs): ', np.shape(inputs)) + # print('\n train() - np.shape(targets): ', np.shape(targets)) + ''' + train() - np.shape(inputs): torch.Size([128, 3, 32, 32]) + train() - np.shape(targets): torch.Size([128]) + ''' + n_cls_indices = [t_i for t_i, target in enumerate(targets) if target in n_cls_ls] + + for in_i, input_ in enumerate(inputs): + if in_i in n_cls_indices: + # print(np.shape(input_)) + # e.g. torch.Size([3, 32, 32]) + if train_inputs_n_cls is None and train_targets_n_cls is None: + train_inputs_n_cls = torch.unsqueeze(input_, axis=0) + train_targets_n_cls = torch.unsqueeze(targets[in_i], axis=0) + else: + train_inputs_n_cls = torch.cat((train_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) + train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(targets[in_i], axis=0)), 0) + # train_inputs_n_cls.append(input_) + print('\n prepare_dataset() - train_inputs_n_cls.shape: ', train_inputs_n_cls.shape) + # e.g. torch.Size([128, 3, 32, 32]) + + # Prepare n_cls data for test set + test_inputs_n_cls, test_targets_n_cls = None, None + for batch_idx, (inputs, targets) in enumerate(testloader_all_cls): + inputs, targets = inputs.to(device), targets.to(device) + # print('\n train() - np.shape(inputs): ', np.shape(inputs)) + # print('\n train() - np.shape(targets): ', np.shape(targets)) + ''' + train() - np.shape(inputs): torch.Size([128, 3, 32, 32]) + train() - np.shape(targets): torch.Size([128]) + ''' + n_cls_indices = [t_i for t_i, target in enumerate(targets) if target in n_cls_ls] + + for in_i, input_ in enumerate(inputs): + if in_i in n_cls_indices: + # print(np.shape(input_)) + # e.g. torch.Size([3, 32, 32]) + if test_inputs_n_cls is None and test_targets_n_cls is None: + test_inputs_n_cls = torch.unsqueeze(input_, axis=0) + test_targets_n_cls = torch.unsqueeze(targets[in_i], axis=0) + else: + test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) + test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(targets[in_i], axis=0)), 0) + # test_inputs_n_cls.append(input_) + print('\n prepare_dataset() - test_inputs_n_cls.shape: ', test_inputs_n_cls.shape) + # e.g. torch.Size([128, 3, 32, 32]) + return train_inputs_n_cls, train_targets_n_cls, test_inputs_n_cls, test_targets_n_cls + +train_inputs_n_cls, train_targets_n_cls, test_inputs_n_cls, test_targets_n_cls = prepare_dataset(args.num_class) + +classes = ('plane', 'car', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck') + +# Model +print('==> Building model..') +if args.net == 'VGG19': net = VGG('VGG19') +elif args.net == 'ResNet18': net = ResNet18() +elif args.net == 'PreActResNet18': net = PreActResNet18() +elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'DenseNet121': net = DenseNet121() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() +elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'DPN92': net = DPN92() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'SENet18': net = SENet18() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) +elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() +elif args.net == 'SimpleDLA': net = SimpleDLA() + +# Borrow sparsity() and prune() from +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def count_layer_params(model, layer_name=nn.Conv2d): + print('\n\n layer_name: ', layer_name) + total_params = 0 + total_traina_params = 0 + n_layers = 0 + for name, m in model.named_modules(): + if isinstance(m, layer_name): + # print('\nm:', m) + # print('\ndir(m): ', dir(m)) + + for name, parameter in m.named_parameters(): + params = parameter.numel() + total_params += params + if not parameter.requires_grad: continue + n_layers += 1 + total_traina_params += params + print('\n\nlayer_name: {}, total_params: {}, total_traina_params: {}, n_layers: {}'.\ + format(layer_name, total_params, total_traina_params, n_layers)) + # time.sleep(100) + +net = net.to(device) +if device == 'cuda': + net = torch.nn.DataParallel(net) + cudnn.benchmark = True + +if args.resume: + # Load checkpoint. + print('==> Resuming from checkpoint..') + assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' + + print('\n\ndevice: ', device) + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) + net.load_state_dict(checkpoint['net'], strict=False) + best_acc = checkpoint['acc'] + start_epoch = checkpoint['epoch'] + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=args.lr, + momentum=0.9, weight_decay=5e-4) +scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) + + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + # for batch_idx, (inputs, targets) in enumerate(trainloader): + for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): + # inputs, targets = inputs.to(device), targets.to(device) + if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): + inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] + targets = train_targets_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] + else: + inputs = train_inputs_n_cls[batch_idx * args.train_batch_size :] + targets = train_targets_n_cls[batch_idx * args.train_batch_size :] + # print('\n train() - inputs.shape: ', inputs.shape) + # print('\n train() - targets.shape: ', targets.shape) + ''' + e.g. + train() - inputs.shape: torch.Size([128, 3, 32, 32]) + train() - targets.shape: torch.Size([128]) + ''' + + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + progress_bar(batch_idx, len(train_inputs_n_cls), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + +def test(epoch): + global best_acc + if args.prune: + prune(net, args.pruning_rate) + input_size = (1, 3, 32, 32) + summary(net, input_size) + count_layer_params(net) + + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + # for batch_idx, (inputs, targets) in enumerate(testloader): + for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): + print('device: ', device) + # inputs, targets = inputs.to(device), targets.to(device) + if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): + inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] + targets = test_targets_n_cls[batch_idx * args.test_batch_size :] + else: + inputs = test_inputs_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] + targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] + + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + progress_bar(batch_idx, len(test_inputs_n_cls), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + # Save checkpoint. + acc = 100.*correct/total + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + best_acc = acc + +print('\n\nargs.train: ', args.train, ', args.test:', args.test) +for epoch in range(args.epochs): + if args.train: train(epoch) + if args.test: + test(epoch) + if not args.train: break + scheduler.step() From 16cb8ab8a76b0a6514b43d3e2b069cd189925d36 Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 7 Jul 2022 18:12:57 -0400 Subject: [PATCH 14/54] Start training for n_cls experiments --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 1b750d9d8..db4ee2a5e 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,9 @@ I'm playing with [PyTorch](http://pytorch.org/) on the CIFAR10 dataset. # Start training with: python main.py --net ResNet18 --train --test +# Start training for n_cls experiments: +python main_n_cls.py --net MobileNetV2 --train --test --num_class 5 + # You can manually resume the training with: python main.py --net ResNet18 --train --test --resume --lr=0.01 ``` From f8a84cf4512e12f5b65208c62ed804eee0d766fd Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 7 Jul 2022 18:34:36 -0400 Subject: [PATCH 15/54] Save n_cls checkpoints --- main_n_cls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 4b0e45ef8..e2912fc5c 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -193,7 +193,7 @@ def count_layer_params(model, layer_name=nn.Conv2d): assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' print('\n\ndevice: ', device) - checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) + checkpoint = torch.load('./checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] @@ -289,7 +289,7 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + torch.save(state, './checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class)) best_acc = acc print('\n\nargs.train: ', args.train, ', args.test:', args.test) From 383d9592e1479f676ac015b9a7b53f1da886147b Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 7 Jul 2022 18:36:47 -0400 Subject: [PATCH 16/54] main_n_cls_nohup.py --- main_n_cls_nohup.py | 294 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 main_n_cls_nohup.py diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py new file mode 100644 index 000000000..90f596d4a --- /dev/null +++ b/main_n_cls_nohup.py @@ -0,0 +1,294 @@ +'''Train CIFAR10 with PyTorch.''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn +from torchinfo import summary + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse + +from models import * +import time +import numpy as np + +parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') +parser.add_argument('--lr', default=0.1, type=float, help='learning rate') +parser.add_argument('--resume', '-r', action='store_true', + help='resume from checkpoint') +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', action='store_true') +parser.add_argument('--test', action='store_true') +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--prune', action='store_true') +parser.add_argument('--pruning_rate', type=float, default=0.30) +parser.add_argument('--train_batch_size', type=int, default=128) +parser.add_argument('--test_batch_size', type=int, default=100) +parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') +parser.add_argument('--num_class', type=int, default=10) + +args = parser.parse_args() + +device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' +best_acc = 0 # best test accuracy +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +def prepare_dataset(num_class=args.num_class): + trainset = torchvision.datasets.CIFAR10( + root='./data', train=True, download=True, transform=transform_train) + trainloader_all_cls = torch.utils.data.DataLoader( + trainset, batch_size=args.train_batch_size, shuffle=True, num_workers=2) + + testset = torchvision.datasets.CIFAR10( + root='./data', train=False, download=True, transform=transform_test) + testloader_all_cls = torch.utils.data.DataLoader( + testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) + + n_cls_ls = list(range(num_class)) + + # Prepare n_cls data for train set + train_inputs_n_cls, train_targets_n_cls = None, None + for batch_idx, (inputs, targets) in enumerate(trainloader_all_cls): + inputs, targets = inputs.to(device), targets.to(device) + # print('\n train() - np.shape(inputs): ', np.shape(inputs)) + # print('\n train() - np.shape(targets): ', np.shape(targets)) + ''' + train() - np.shape(inputs): torch.Size([128, 3, 32, 32]) + train() - np.shape(targets): torch.Size([128]) + ''' + n_cls_indices = [t_i for t_i, target in enumerate(targets) if target in n_cls_ls] + + for in_i, input_ in enumerate(inputs): + if in_i in n_cls_indices: + # print(np.shape(input_)) + # e.g. torch.Size([3, 32, 32]) + if train_inputs_n_cls is None and train_targets_n_cls is None: + train_inputs_n_cls = torch.unsqueeze(input_, axis=0) + train_targets_n_cls = torch.unsqueeze(targets[in_i], axis=0) + else: + train_inputs_n_cls = torch.cat((train_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) + train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(targets[in_i], axis=0)), 0) + # train_inputs_n_cls.append(input_) + print('\n prepare_dataset() - train_inputs_n_cls.shape: ', train_inputs_n_cls.shape) + # e.g. torch.Size([128, 3, 32, 32]) + + # Prepare n_cls data for test set + test_inputs_n_cls, test_targets_n_cls = None, None + for batch_idx, (inputs, targets) in enumerate(testloader_all_cls): + inputs, targets = inputs.to(device), targets.to(device) + # print('\n train() - np.shape(inputs): ', np.shape(inputs)) + # print('\n train() - np.shape(targets): ', np.shape(targets)) + ''' + train() - np.shape(inputs): torch.Size([128, 3, 32, 32]) + train() - np.shape(targets): torch.Size([128]) + ''' + n_cls_indices = [t_i for t_i, target in enumerate(targets) if target in n_cls_ls] + + for in_i, input_ in enumerate(inputs): + if in_i in n_cls_indices: + # print(np.shape(input_)) + # e.g. torch.Size([3, 32, 32]) + if test_inputs_n_cls is None and test_targets_n_cls is None: + test_inputs_n_cls = torch.unsqueeze(input_, axis=0) + test_targets_n_cls = torch.unsqueeze(targets[in_i], axis=0) + else: + test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) + test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(targets[in_i], axis=0)), 0) + # test_inputs_n_cls.append(input_) + print('\n prepare_dataset() - test_inputs_n_cls.shape: ', test_inputs_n_cls.shape) + # e.g. torch.Size([128, 3, 32, 32]) + return train_inputs_n_cls, train_targets_n_cls, test_inputs_n_cls, test_targets_n_cls + +train_inputs_n_cls, train_targets_n_cls, test_inputs_n_cls, test_targets_n_cls = prepare_dataset(args.num_class) + +classes = ('plane', 'car', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck') + +# Model +print('==> Building model..') +if args.net == 'VGG19': net = VGG('VGG19') +elif args.net == 'ResNet18': net = ResNet18() +elif args.net == 'PreActResNet18': net = PreActResNet18() +elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'DenseNet121': net = DenseNet121() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() +elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'DPN92': net = DPN92() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'SENet18': net = SENet18() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) +elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() +elif args.net == 'SimpleDLA': net = SimpleDLA() + +# Borrow sparsity() and prune() from +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def count_layer_params(model, layer_name=nn.Conv2d): + print('\n\n layer_name: ', layer_name) + total_params = 0 + total_traina_params = 0 + n_layers = 0 + for name, m in model.named_modules(): + if isinstance(m, layer_name): + # print('\nm:', m) + # print('\ndir(m): ', dir(m)) + + for name, parameter in m.named_parameters(): + params = parameter.numel() + total_params += params + if not parameter.requires_grad: continue + n_layers += 1 + total_traina_params += params + print('\n\nlayer_name: {}, total_params: {}, total_traina_params: {}, n_layers: {}'.\ + format(layer_name, total_params, total_traina_params, n_layers)) + # time.sleep(100) + +net = net.to(device) +if device == 'cuda': + net = torch.nn.DataParallel(net) + cudnn.benchmark = True + +if args.resume: + # Load checkpoint. + print('==> Resuming from checkpoint..') + assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' + + print('\n\ndevice: ', device) + checkpoint = torch.load('./checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class), map_location=device) + net.load_state_dict(checkpoint['net'], strict=False) + best_acc = checkpoint['acc'] + start_epoch = checkpoint['epoch'] + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=args.lr, + momentum=0.9, weight_decay=5e-4) +scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) + + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + # for batch_idx, (inputs, targets) in enumerate(trainloader): + for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): + # inputs, targets = inputs.to(device), targets.to(device) + if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): + inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] + targets = train_targets_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] + else: + inputs = train_inputs_n_cls[batch_idx * args.train_batch_size :] + targets = train_targets_n_cls[batch_idx * args.train_batch_size :] + # print('\n train() - inputs.shape: ', inputs.shape) + # print('\n train() - targets.shape: ', targets.shape) + ''' + e.g. + train() - inputs.shape: torch.Size([128, 3, 32, 32]) + train() - targets.shape: torch.Size([128]) + ''' + + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + +def test(epoch): + global best_acc + if args.prune: + prune(net, args.pruning_rate) + input_size = (1, 3, 32, 32) + summary(net, input_size) + count_layer_params(net) + + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + # for batch_idx, (inputs, targets) in enumerate(testloader): + for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): + print('device: ', device) + # inputs, targets = inputs.to(device), targets.to(device) + if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): + inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] + targets = test_targets_n_cls[batch_idx * args.test_batch_size :] + else: + inputs = test_inputs_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] + targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] + + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + # Save checkpoint. + acc = 100.*correct/total + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class)) + best_acc = acc + +print('\n\nargs.train: ', args.train, ', args.test:', args.test) +for epoch in range(args.epochs): + if args.train: train(epoch) + if args.test: + test(epoch) + if not args.train: break + scheduler.step() From c2c77144c9388b57ac5f4e379e2c0fbb9ddf02eb Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 7 Jul 2022 18:53:10 -0400 Subject: [PATCH 17/54] Check if inputs are empty before feeding into the network --- main_n_cls.py | 43 +++++++++++++++++++++++-------------------- main_n_cls_nohup.py | 35 +++++++++++++++++++---------------- 2 files changed, 42 insertions(+), 36 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index e2912fc5c..de6309358 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -214,6 +214,7 @@ def train(epoch): # for batch_idx, (inputs, targets) in enumerate(trainloader): for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): # inputs, targets = inputs.to(device), targets.to(device) + inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] targets = train_targets_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] @@ -227,20 +228,20 @@ def train(epoch): train() - inputs.shape: torch.Size([128, 3, 32, 32]) train() - targets.shape: torch.Size([128]) ''' + if inputs is not None and targets is not None: + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() - optimizer.zero_grad() - outputs = net(inputs) - loss = criterion(outputs, targets) - loss.backward() - optimizer.step() - - train_loss += loss.item() - _, predicted = outputs.max(1) - total += targets.size(0) - correct += predicted.eq(targets).sum().item() + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() - progress_bar(batch_idx, len(train_inputs_n_cls), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' - % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + progress_bar(batch_idx, len(train_inputs_n_cls), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) def test(epoch): @@ -260,6 +261,7 @@ def test(epoch): for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): print('device: ', device) # inputs, targets = inputs.to(device), targets.to(device) + inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] targets = test_targets_n_cls[batch_idx * args.test_batch_size :] @@ -267,16 +269,17 @@ def test(epoch): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] - outputs = net(inputs) - loss = criterion(outputs, targets) + if inputs is not None and targets is not None: + outputs = net(inputs) + loss = criterion(outputs, targets) - test_loss += loss.item() - _, predicted = outputs.max(1) - total += targets.size(0) - correct += predicted.eq(targets).sum().item() + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() - progress_bar(batch_idx, len(test_inputs_n_cls), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' - % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) + progress_bar(batch_idx, len(test_inputs_n_cls), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Save checkpoint. acc = 100.*correct/total diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 90f596d4a..c069cbf70 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -213,6 +213,7 @@ def train(epoch): # for batch_idx, (inputs, targets) in enumerate(trainloader): for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): # inputs, targets = inputs.to(device), targets.to(device) + inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] targets = train_targets_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] @@ -226,17 +227,17 @@ def train(epoch): train() - inputs.shape: torch.Size([128, 3, 32, 32]) train() - targets.shape: torch.Size([128]) ''' + if inputs is not None and targets is not None: + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() - optimizer.zero_grad() - outputs = net(inputs) - loss = criterion(outputs, targets) - loss.backward() - optimizer.step() - - train_loss += loss.item() - _, predicted = outputs.max(1) - total += targets.size(0) - correct += predicted.eq(targets).sum().item() + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() def test(epoch): @@ -256,6 +257,7 @@ def test(epoch): for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): print('device: ', device) # inputs, targets = inputs.to(device), targets.to(device) + inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] targets = test_targets_n_cls[batch_idx * args.test_batch_size :] @@ -263,13 +265,14 @@ def test(epoch): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] - outputs = net(inputs) - loss = criterion(outputs, targets) + if inputs is not None and targets is not None: + outputs = net(inputs) + loss = criterion(outputs, targets) - test_loss += loss.item() - _, predicted = outputs.max(1) - total += targets.size(0) - correct += predicted.eq(targets).sum().item() + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() # Save checkpoint. acc = 100.*correct/total From f97824910d628818e39e2a06df8b6af256ff6501 Mon Sep 17 00:00:00 2001 From: BBC Date: Thu, 7 Jul 2022 19:05:10 -0400 Subject: [PATCH 18/54] Check if inputs are empty before feeding into the network --- main_n_cls.py | 4 ++-- main_n_cls_nohup.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index de6309358..f7d40e6b1 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -228,7 +228,7 @@ def train(epoch): train() - inputs.shape: torch.Size([128, 3, 32, 32]) train() - targets.shape: torch.Size([128]) ''' - if inputs is not None and targets is not None: + if inputs.size()[0] > 0 and targets.size()[0] > 0: optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) @@ -269,7 +269,7 @@ def test(epoch): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] - if inputs is not None and targets is not None: + if inputs.size()[0] > 0 and targets.size()[0] > 0: outputs = net(inputs) loss = criterion(outputs, targets) diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index c069cbf70..1c87b9b49 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -227,7 +227,7 @@ def train(epoch): train() - inputs.shape: torch.Size([128, 3, 32, 32]) train() - targets.shape: torch.Size([128]) ''' - if inputs is not None and targets is not None: + if inputs.size()[0] > 0 and targets.size()[0] > 0: optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) @@ -265,7 +265,7 @@ def test(epoch): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] - if inputs is not None and targets is not None: + if inputs.size()[0] > 0 and targets.size()[0] > 0: outputs = net(inputs) loss = criterion(outputs, targets) From 610e452b554506bf703eedcf4c63c90caf8b1e5e Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 12:20:49 -0400 Subject: [PATCH 19/54] Correct num_classes layer for MobileNetV2 --- main_n_cls.py | 2 +- main_n_cls_nohup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index f7d40e6b1..6527f0ab2 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -132,7 +132,7 @@ def prepare_dataset(num_class=args.num_class): elif args.net == 'DenseNet121': net = DenseNet121() elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() elif args.net == 'MobileNet': net = MobileNet() -elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_classes) elif args.net == 'DPN92': net = DPN92() elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() elif args.net == 'SENet18': net = SENet18() diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 1c87b9b49..e5c01d22d 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -131,7 +131,7 @@ def prepare_dataset(num_class=args.num_class): elif args.net == 'DenseNet121': net = DenseNet121() elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() elif args.net == 'MobileNet': net = MobileNet() -elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_classes) elif args.net == 'DPN92': net = DPN92() elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() elif args.net == 'SENet18': net = SENet18() From dad10af2bfa638a5b115ee4dd744e7f1cd005612 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 12:31:32 -0400 Subject: [PATCH 20/54] Correct num_class layer for MobileNetV2 --- main_n_cls.py | 2 +- main_n_cls_nohup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 6527f0ab2..33e7c1c1d 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -132,7 +132,7 @@ def prepare_dataset(num_class=args.num_class): elif args.net == 'DenseNet121': net = DenseNet121() elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() elif args.net == 'MobileNet': net = MobileNet() -elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_classes) +elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_class) elif args.net == 'DPN92': net = DPN92() elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() elif args.net == 'SENet18': net = SENet18() diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index e5c01d22d..ec263fc4b 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -131,7 +131,7 @@ def prepare_dataset(num_class=args.num_class): elif args.net == 'DenseNet121': net = DenseNet121() elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() elif args.net == 'MobileNet': net = MobileNet() -elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_classes) +elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_class) elif args.net == 'DPN92': net = DPN92() elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() elif args.net == 'SENet18': net = SENet18() From 7ea9c170d4d2c7250c05a0c4ba332990a656a8a7 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 14:15:29 -0400 Subject: [PATCH 21/54] Move inputs,targets to GPU --- main_n_cls.py | 4 ++-- main_n_cls_nohup.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 33e7c1c1d..caead0b6d 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -213,7 +213,6 @@ def train(epoch): total = 0 # for batch_idx, (inputs, targets) in enumerate(trainloader): for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): - # inputs, targets = inputs.to(device), targets.to(device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] @@ -229,6 +228,7 @@ def train(epoch): train() - targets.shape: torch.Size([128]) ''' if inputs.size()[0] > 0 and targets.size()[0] > 0: + inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) @@ -260,7 +260,6 @@ def test(epoch): # for batch_idx, (inputs, targets) in enumerate(testloader): for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): print('device: ', device) - # inputs, targets = inputs.to(device), targets.to(device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] @@ -270,6 +269,7 @@ def test(epoch): targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] if inputs.size()[0] > 0 and targets.size()[0] > 0: + inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index ec263fc4b..2b84c3899 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -212,7 +212,6 @@ def train(epoch): total = 0 # for batch_idx, (inputs, targets) in enumerate(trainloader): for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): - # inputs, targets = inputs.to(device), targets.to(device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] @@ -228,6 +227,7 @@ def train(epoch): train() - targets.shape: torch.Size([128]) ''' if inputs.size()[0] > 0 and targets.size()[0] > 0: + inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, targets) @@ -256,7 +256,6 @@ def test(epoch): # for batch_idx, (inputs, targets) in enumerate(testloader): for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): print('device: ', device) - # inputs, targets = inputs.to(device), targets.to(device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] @@ -266,6 +265,7 @@ def test(epoch): targets = test_targets_n_cls[batch_idx * args.test_batch_size : (batch_idx + 1) * args.test_batch_size] if inputs.size()[0] > 0 and targets.size()[0] > 0: + inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = criterion(outputs, targets) From 62a444aff0ea7f0d675199c4a02b9cf02e444cfb Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 14:25:20 -0400 Subject: [PATCH 22/54] Save models at epoch=0 --- main_n_cls.py | 2 +- main_n_cls_nohup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index caead0b6d..931ecc591 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -283,7 +283,7 @@ def test(epoch): # Save checkpoint. acc = 100.*correct/total - if acc > best_acc: + if epoch == 0 or acc > best_acc: print('Saving..') state = { 'net': net.state_dict(), diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 2b84c3899..cfa67b229 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -276,7 +276,7 @@ def test(epoch): # Save checkpoint. acc = 100.*correct/total - if acc > best_acc: + if epoch == 0 or acc > best_acc: print('Saving..') state = { 'net': net.state_dict(), From a27d19c17883dba14f1471cc22e803cb088d1e66 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 22:33:04 -0400 Subject: [PATCH 23/54] Move net.to(device) after loading the weights --- main_n_cls.py | 3 +-- main_n_cls_nohup.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 931ecc591..58c88c3d7 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -182,7 +182,6 @@ def count_layer_params(model, layer_name=nn.Conv2d): format(layer_name, total_params, total_traina_params, n_layers)) # time.sleep(100) -net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net) cudnn.benchmark = True @@ -198,12 +197,12 @@ def count_layer_params(model, layer_name=nn.Conv2d): best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] +net = net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) - # Training def train(epoch): print('\nEpoch: %d' % epoch) diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index cfa67b229..5591d369d 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -181,7 +181,6 @@ def count_layer_params(model, layer_name=nn.Conv2d): format(layer_name, total_params, total_traina_params, n_layers)) # time.sleep(100) -net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net) cudnn.benchmark = True @@ -197,12 +196,12 @@ def count_layer_params(model, layer_name=nn.Conv2d): best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] +net = net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) - # Training def train(epoch): print('\nEpoch: %d' % epoch) From 2e8d7ae4e54aad124bb0c0de93918f287918b6e3 Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 23:07:30 -0400 Subject: [PATCH 24/54] Save models --- main_n_cls.py | 19 +++++++++++++++++-- main_n_cls_nohup.py | 19 +++++++++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 58c88c3d7..20126e65f 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -31,6 +31,8 @@ parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') parser.add_argument('--num_class', type=int, default=10) +parser.add_argument('--save_model_epoch_interval', type=int, default=10) +parser.add_argument('--load_epoch', type=str, default='best', help='best | ') args = parser.parse_args() @@ -282,7 +284,7 @@ def test(epoch): # Save checkpoint. acc = 100.*correct/total - if epoch == 0 or acc > best_acc: + if epoch % args.save_model_epoch_interval == 0: print('Saving..') state = { 'net': net.state_dict(), @@ -291,7 +293,20 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class)) + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.num_class, str(args.save_model_epoch_interval))) + best_acc = acc + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_n_cls_{}_best_ckpt.pth'.\ + format(args.net, args.num_class)) best_acc = acc print('\n\nargs.train: ', args.train, ', args.test:', args.test) diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 5591d369d..d77cbd063 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -30,6 +30,8 @@ parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') parser.add_argument('--num_class', type=int, default=10) +parser.add_argument('--save_model_epoch_interval', type=int, default=10) +parser.add_argument('--load_epoch', type=str, default='best', help='best | ') args = parser.parse_args() @@ -275,7 +277,7 @@ def test(epoch): # Save checkpoint. acc = 100.*correct/total - if epoch == 0 or acc > best_acc: + if epoch % args.save_model_epoch_interval == 0: print('Saving..') state = { 'net': net.state_dict(), @@ -284,7 +286,20 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class)) + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.num_class, str(args.save_model_epoch_interval))) + best_acc = acc + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_n_cls_{}_best_ckpt.pth'.\ + format(args.net, args.num_class)) best_acc = acc print('\n\nargs.train: ', args.train, ', args.test:', args.test) From 9ac704a1b6e3647f0f4cffa07cf67e326a80b4df Mon Sep 17 00:00:00 2001 From: BBC Date: Fri, 8 Jul 2022 23:36:29 -0400 Subject: [PATCH 25/54] Save models --- main_n_cls.py | 4 ++-- main_n_cls_nohup.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 20126e65f..24aafd5cb 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -294,7 +294,7 @@ def test(epoch): if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ - format(args.net, args.num_class, str(args.save_model_epoch_interval))) + format(args.net, args.num_class, str(epoch))) best_acc = acc if acc > best_acc: print('Saving..') @@ -305,7 +305,7 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_n_cls_{}_best_ckpt.pth'.\ + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_best_ckpt.pth'.\ format(args.net, args.num_class)) best_acc = acc diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index d77cbd063..2994aadeb 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -287,7 +287,7 @@ def test(epoch): if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ - format(args.net, args.num_class, str(args.save_model_epoch_interval))) + format(args.net, args.num_class, str(epoch))) best_acc = acc if acc > best_acc: print('Saving..') @@ -298,7 +298,7 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_n_cls_{}_best_ckpt.pth'.\ + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_best_ckpt.pth'.\ format(args.net, args.num_class)) best_acc = acc From 4ae4620738e84e8b40f5e0fc79467ca9b0c71522 Mon Sep 17 00:00:00 2001 From: BBC Date: Sat, 9 Jul 2022 00:24:03 -0400 Subject: [PATCH 26/54] Load epoch --- main_n_cls.py | 3 ++- main_n_cls_nohup.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 24aafd5cb..3df8c7eaf 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -194,7 +194,8 @@ def count_layer_params(model, layer_name=nn.Conv2d): assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' print('\n\ndevice: ', device) - checkpoint = torch.load('./checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class), map_location=device) + checkpoint = torch.load('./checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.num_class, args.load_epoch), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 2994aadeb..7f9ce05b7 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -193,7 +193,8 @@ def count_layer_params(model, layer_name=nn.Conv2d): assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' print('\n\ndevice: ', device) - checkpoint = torch.load('./checkpoint/{}_n_cls_{}_ckpt.pth'.format(args.net, args.num_class), map_location=device) + checkpoint = torch.load('./checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.num_class, args.load_epoch), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] From e007fe5d5ef7eb05071f015552ce8997921cedfe Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 11:35:25 -0400 Subject: [PATCH 27/54] Add num_classes args for models --- main_n_cls.py | 18 +++++++++--------- main_n_cls_nohup.py | 18 +++++++++--------- models/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 493 bytes models/__pycache__/densenet.cpython-36.pyc | Bin 0 -> 4124 bytes models/__pycache__/dla.cpython-36.pyc | Bin 0 -> 4234 bytes models/__pycache__/dla_simple.cpython-36.pyc | Bin 0 -> 4085 bytes models/__pycache__/dpn.cpython-36.pyc | Bin 0 -> 3684 bytes .../__pycache__/efficientnet.cpython-36.pyc | Bin 0 -> 4969 bytes models/__pycache__/googlenet.cpython-36.pyc | Bin 0 -> 2839 bytes models/__pycache__/lenet.cpython-36.pyc | Bin 0 -> 1077 bytes models/__pycache__/mobilenet.cpython-36.pyc | Bin 0 -> 2594 bytes models/__pycache__/mobilenetv2.cpython-36.pyc | Bin 0 -> 3099 bytes models/__pycache__/pnasnet.cpython-36.pyc | Bin 0 -> 4623 bytes .../__pycache__/preact_resnet.cpython-36.pyc | Bin 0 -> 4543 bytes models/__pycache__/regnet.cpython-36.pyc | Bin 0 -> 4572 bytes models/__pycache__/resnet.cpython-36.pyc | Bin 0 -> 4423 bytes models/__pycache__/resnext.cpython-36.pyc | Bin 0 -> 3547 bytes models/__pycache__/senet.cpython-36.pyc | Bin 0 -> 3906 bytes models/__pycache__/shufflenet.cpython-36.pyc | Bin 0 -> 4019 bytes .../__pycache__/shufflenetv2.cpython-36.pyc | Bin 0 -> 5440 bytes models/__pycache__/vgg.cpython-36.pyc | Bin 0 -> 1676 bytes models/googlenet.py | 4 ++-- models/shufflenetv2.py | 4 ++-- models/vgg.py | 4 ++-- 24 files changed, 24 insertions(+), 24 deletions(-) create mode 100644 models/__pycache__/__init__.cpython-36.pyc create mode 100644 models/__pycache__/densenet.cpython-36.pyc create mode 100644 models/__pycache__/dla.cpython-36.pyc create mode 100644 models/__pycache__/dla_simple.cpython-36.pyc create mode 100644 models/__pycache__/dpn.cpython-36.pyc create mode 100644 models/__pycache__/efficientnet.cpython-36.pyc create mode 100644 models/__pycache__/googlenet.cpython-36.pyc create mode 100644 models/__pycache__/lenet.cpython-36.pyc create mode 100644 models/__pycache__/mobilenet.cpython-36.pyc create mode 100644 models/__pycache__/mobilenetv2.cpython-36.pyc create mode 100644 models/__pycache__/pnasnet.cpython-36.pyc create mode 100644 models/__pycache__/preact_resnet.cpython-36.pyc create mode 100644 models/__pycache__/regnet.cpython-36.pyc create mode 100644 models/__pycache__/resnet.cpython-36.pyc create mode 100644 models/__pycache__/resnext.cpython-36.pyc create mode 100644 models/__pycache__/senet.cpython-36.pyc create mode 100644 models/__pycache__/shufflenet.cpython-36.pyc create mode 100644 models/__pycache__/shufflenetv2.cpython-36.pyc create mode 100644 models/__pycache__/vgg.cpython-36.pyc diff --git a/main_n_cls.py b/main_n_cls.py index 3df8c7eaf..e16d5b6df 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -127,21 +127,21 @@ def prepare_dataset(num_class=args.num_class): # Model print('==> Building model..') -if args.net == 'VGG19': net = VGG('VGG19') -elif args.net == 'ResNet18': net = ResNet18() +if args.net == 'VGG19': net = VGG('VGG19', args.num_class) +elif args.net == 'ResNet18': net = ResNet18(args.num_class) elif args.net == 'PreActResNet18': net = PreActResNet18() -elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'GoogLeNet': net = GoogLeNet(args.num_class) elif args.net == 'DenseNet121': net = DenseNet121() -elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() -elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d(args.num_class) +elif args.net == 'MobileNet': net = MobileNet(args.num_class) elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_class) elif args.net == 'DPN92': net = DPN92() -elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2(args.num_class) elif args.net == 'SENet18': net = SENet18() -elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) -elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1, args.num_class) +elif args.net == 'EfficientNetB0': net = EfficientNetB0(args.num_class) elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() -elif args.net == 'SimpleDLA': net = SimpleDLA() +elif args.net == 'SimpleDLA': net = SimpleDLA(args.num_class) # Borrow sparsity() and prune() from # https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 7f9ce05b7..6a8c08d50 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -126,21 +126,21 @@ def prepare_dataset(num_class=args.num_class): # Model print('==> Building model..') -if args.net == 'VGG19': net = VGG('VGG19') -elif args.net == 'ResNet18': net = ResNet18() +if args.net == 'VGG19': net = VGG('VGG19', args.num_class) +elif args.net == 'ResNet18': net = ResNet18(args.num_class) elif args.net == 'PreActResNet18': net = PreActResNet18() -elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'GoogLeNet': net = GoogLeNet(args.num_class) elif args.net == 'DenseNet121': net = DenseNet121() -elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() -elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d(args.num_class) +elif args.net == 'MobileNet': net = MobileNet(args.num_class) elif args.net == 'MobileNetV2': net = MobileNetV2(args.num_class) elif args.net == 'DPN92': net = DPN92() -elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2(args.num_class) elif args.net == 'SENet18': net = SENet18() -elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) -elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1, args.num_class) +elif args.net == 'EfficientNetB0': net = EfficientNetB0(args.num_class) elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() -elif args.net == 'SimpleDLA': net = SimpleDLA() +elif args.net == 'SimpleDLA': net = SimpleDLA(args.num_class) # Borrow sparsity() and prune() from # https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 diff --git a/models/__pycache__/__init__.cpython-36.pyc b/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7bcd5c95a5c67d70d92cbef5a4be3ad4c85ec25 GIT binary patch literal 493 zcmajbO-_U`6ae6Mkl|;*zazNAL`i10#&`l3qboK9Xn`iBZ74HwPT&Q+gmLF_x^?9h zTv^~n-ATjy9xr|E3!69&H_z|o8%F2@{dpEPzq7A?v4J3hIeMJM-~x2OBhUqxpa=S3 z0EXZSjKCN?22a3K@XWe+K#B8dXGRG&`0_qEH_pCkGKVOUxtt0*WUij@lqj_QHa?+Z zDw&{>vY9u9y4}_Fl$+=`->=QGr(;{@O+F1-ta@5hLq69{(8-d`jjNy3_C>U=S;eRr z#!Kxf&3H;kc#&(?DNgN>ym&=@WT&6P*E(%wN7J&e3Yl)Hk~&p~LH1R9Rk6D0(@qk~ eb(-f)up!T{)xr2Twz%W;cKiwY!UiAzx4r@Fc!aG0 literal 0 HcmV?d00001 diff --git a/models/__pycache__/densenet.cpython-36.pyc b/models/__pycache__/densenet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e67fe5a2a1d75a1656da04fa4b473d90c96abfd GIT binary patch literal 4124 zcmbVPTW{mm5k9{y#7ND;I_cCl&N#7LW#`qV$s*Z!D3oTmbP2=o{F(ta}}B{|tP-caD23lB$gX1@96@b=nTaQn&M zkN>eI#J|MKWuyNN?)yv;C-%sn zS%Vg49O+`lt!Dzvc+$s=U(d8$#t~razk#S-H=WN{YQ9<0{~4kXg;9!`IWlL$5CyzZ8Z*p<4*G5h z{iV4hq)GZevoY_{nrfEU`LwPYmnQr~E8Wi=tZYdEJG`Sg1!8VM-0PXTeLqoIl7{*4 zGST+&Fv{m-OwUpd8tqLYDTmpqa%nQqPClI^N}2S`55r+LEW%KmS*F|XMn!yfG*;(* zsjcJeP`h`=*+UGR80{g=^{alrXK6c6(i7bpX5l1_vLx5-Q#F26oP{bXlAfVkVHl@T zo`>Pz#J~FoXXEqa;8?}c_~5t6WSk#NE(!|HtMTw8QU~W_nWXuFr08Wyu|K&`0ZgPj zCn1b=qhma8Y#Nu_%P{OW8;EH?bkWwyJ$G>D??9Adgounxv=&8zepfWI>shBXo7uu0 zd8Jtzr$F+|#45s+d89~T&sQ60v~f?{DoLjb*&);xiA|b!csJULqXMq)^3qSWHJ%n~ zmlm;jj;6hnv3e9KN$=)9gfMpT!_K-ZJF6GzM>td?~x#cs5XfXgf@>ewN4$IfLn({ zZPQGEfYg9(JhF%kt9b#dax%C=+~xLiPhWwyAK;y*qYX&2XnNjNVk= zC9zQ3c!hC7&L!g-oH*?67&D8D5JXqL577{*446u7={#=f-UIJlhfryO8!a%VwIiKSmrHd%IFNhhv;Ws0C?@fBMRRp6pX6+rpEa4R$jJz4xB3vuIt5q#lt!16HV00rb z8(+|3)LJ%K3r086vh@WmX04^mS}5rXLwn^9;4;)?Rt!s$FFm8S1ICtFwBxjX*cB-zV>!X51Bj+PiW}dt?BwRHw`t>XRX=K5%M2 zSpR)>uCV_?=Ky-wmbi8o06l*F4Pn5BP2y(LxK4~NMk9EEW9Qg7kWjrjW4SeO%D zg%JQs^b2sb=|Wy^E*#r1x21lJmFg!XUV-R&Po;8-6jy`)$LEy6RARVV_{RE+zNcvJ z8lw|D8Jd&`M732tmi8(597j>BI)u_PHf>IhnbRR?z6XIcqO?P;X1<_0w}Prga_M%n z)5HGZ@31K6Bb=egJ2$e&f<1JICVLRv_vnw-*{yivtv{?-@xe{4Ag{nLY=ygG z#r!q0=WChLf9>#(E2bPhiz&+hEgdqakgH#k_z>c2xaKg#drg2oLjOhqBG=#x=1Z=r zOnLtqOj(!pcR|Qs3&$(g{ASe}3xfr-5*7+E6IXoxuR(0h>?<%&#X^Ne{9xc)77(fr znN>C9xY>%7t8YLs!OBcK6NOTSuOAB^DT+>I+PGMOHy`lrBFT#c&+5~?iv;%#MIi5W zwHJnR9OG!t0Ab}me>?1FS;g7g&2ax@nsMQZQXMS&%tYG#kUs_Zda=ytk&-)esU1!8?G-~a#s literal 0 HcmV?d00001 diff --git a/models/__pycache__/dla.cpython-36.pyc b/models/__pycache__/dla.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71dce2325f0b5f7b2cfe305b9679c79fcefd0c3f GIT binary patch literal 4234 zcmb_fZExJh5njIYcrUTfs+y{4dufxj7sNVCcI?1tgT!{7rf`M8aa#0%f}pso6Hh1J zdC5~G;S>Zo0|pA{Zxm=h_LubYe(k6G3;B{hvm{S%ZrTDR!P(v6a9?KTnHk=0HiO$Q z|Gxd{b;kb5&RsV8AK}SffN<7l+~DTO=o`8w z+)2{TXGhP*GTvPef~R6fNRh_ke$c`7NQg=2NpvJ+=i|XZib0eo<8-~Vo9C14eyDEvcG}kEH`FsZVpX4Wo`}a znK8p6GmEC@_NhB#p##1sr#GPh28&?ru{#*>obLG%-?s7$#|whoLglRJjkw>A@yfPK|N5~UeTjt$zG811N9l7^E} zl!{EbMXR^dHB>DO<584lVfa_}&CTBKcwh9kWgLxrPsL=M^(IF-IoZuP*@+w;vJ~@&Bw%@{&t${G3W!T1w(J%t^8fIXuV%);$_{NzhtXGGP>FjioCdnfn;>jL> zzdae~+D*`Ww7~9AU)Rg8vzhl`X|+Dqq8-vPlH! zX7U=58z3;u_s}TgP+8+?E`LBTG@~=hJ9Zypb!IPU}z7^r~4}Ar_-i7@q9}lfF)pv?1*+b#yZF zB;T;j)^*K`Foy)|Ep2@Atrk@N`V-{sqs3Z z)0&QcSJA4G<80~q7i9OxP`!qOhe3e}U)HGq77UlXJ#Nz3ulgk^a_N_(OzmTc`*RcihRA=> zBjJ12%z`q_;+$PqIT|_2ilbcq5EIBZi2R%g%~u`7Jj~>ei1}k87oAXhM-O~$a|Uo> z!wtRROK#`N4t@S6k)IOLN+jo!KO?f7w^EFDDN;9WD5I15M$6L%_Mh1z`c-b@ zTl4$);_R!#4{|@R!9ndgg2N;Br*}?Vq?%$b7qi)?&WQ&@2Hcsst8C`YO@lp`e_)r^ zt6y60%KBw;&?lB}<8V+j9lUEQXCw~9NRN(v-4Ea9-A3W>Hz8A)YMpvy4@50Eq^ga? zP9CNRxGIpzV7Ki160^xnAVbI~9SBtq!%XB+o=XIo+61pd`o8NIFmbji0ZVdx*(IXa zp{*$+>3i99JBOtzr?`lgyO7GrX(_h5hL7lX&1jn|i1I$-`idSOH_6mj@k2{2@$+X4 zpk`E0d$wFf4@Q_910yvDh~6-hnNu8rL}{qEfF?wIa)MO`Y{9tn#R*J@9Dp9o^d8Jq z=795j@Vv{i;Q5p}PMjG9JgdOyIdjwsy}XZo%1t7-K}ffQrEcA5G7%}4x9If-5dvn{ z)p{-v{2p=df%Lt?(8<3A^(TK;59Wb;Ei*;HuVM9Tya`{Un(?|03#~0lKv?L)gA{_o@mxu`%z@7q z9TNKpq-&nxbn!J9I;Jd89kYZXgLd0R__$B6bfnkmvQ0zEnMJm=pNc16pa_jJ)TSf) zAJ{0{O>O;haJy{plgja9C>=%i>hSSs$qmm{Lo@C4*{9En29)yr?Gnsl}nWIstiF+~mYEiOcGs#YcKOgTLBBHE7~}*wo3oSR=M4dwiXshYzr#GI;&*-(KtW@MW#wz3Jtd`Mr zv9fcKo^|)+_kBK3h*yU2Ccv)o^`hn#T0U zjK8YjtY6sD{hco8b>XekwMLTOzFx0T{<7LC_PVAw^2_31;6qYjMn3?yPNNO0{YJah L?zFqU?;HODQm0=R literal 0 HcmV?d00001 diff --git a/models/__pycache__/dla_simple.cpython-36.pyc b/models/__pycache__/dla_simple.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cf66f26834c8e7d027301dd1856c75e7d694adb GIT binary patch literal 4085 zcmb_fOK%&=5$<`C96r{jm4)MMlF2(ESp_ zhpFJ6^^UL1X-3 z@Kg+i6nP@<2O*x%gcyfU;$v*|aBokFJ#)5xxL=lIb$>93<;(P_U&y^dysHN5_io?o z-@d!~$!0KzCD?XCbor2#=usbn2nOr09h+OQjsJ!XVKMH!ZtS?+K?%ysnL7&eMEPySr(udXDty6Q2m( zyVus9ng9mU#!T0vD9zI{inN{Q+J98!M;lyw3Ho*2+=|O&f4h)};5obbdi7$1#v2+S z`D#K)i!)tU`-Lo%NvRv+yEve$L^jloG>^tvoC~G>syD~!S-KHLNfs*=MSo`B zY!3E|LowKuNn8w`igBR^vi1xX9E1xWtVK`GBVkaElvg>J|vII+km# zSuHETsAUJ%`~){vLZ}n{V(2a^%m}HehHy)~*!UL?Zq$*(P$G$TcE#Y}n^p=(P1xJH@1w-=-Ia z(0z0b-uEJw{KuFmx+=qsu2nbcp1aXPtY;Z)U}M-dj}Hax8blEkH_2$+ilVP3aaO&N zYgo?cPktXG-Ch_Y5m^>RXD7j;H%XEXWI#iQR|KfK<=UO=hL>QvdX~PXehSvgZO^+b z{K~kjYh2bdF6%xmiqf3_YG?fvi(T!ka2#@1>n0-UEfc9PJFRgbZ`-UtC%Zp}>Wx>_ zFsWRlE*mr^WmkG>(x?TZUGfOop`pDCXjfKi0@=xaoWlu~Zoz)5nen=%=2rGJtA2z< z7Pcg``mpFpo9=!Ym!wE)2A8BvgE8c&nGL)lOTCRqV9%L4P^Mkov*&BiIFNRdxRl?) z0`dbQKO;ixEiYofRPwvT{2r0FL1@si3BIv8gLC1)Epy-tu=8a|pZ|c!4~Y7IHdY50((l?a{-zQ2UyPjH6H zXRRtI+S<#+k;tl9twJP0rcx#;3n2w)(Tz+D%c#U1b$|rFUyXZy1x;_4a-aM;ec2<@ zCqnmMD*tU%{t|Cg7ldNyHLH%+wSB7#e4eyPk(bd)iG>vT2u%^1fJR2q&_qh8!{Cvk zD3rHHGbpr*jE=kwdAUwx0Mhf!sSO6T3rAWvbmQOR@e6#UD4#I|cMA3=Epx~vB5kA8 zO0)kPEJ^40X_o~oTaq|e!}3GCS;jemKs2=9S)Ta1VM;xL(r!*Zl8_qdp`Kz8_39gt zi})P5s2V&|d040xOdLJ+!Sgv3jK0QwBqu^C)gzTORVn{Tnkts1bnTLK?ZVnY*_>Kb z?wT_+4AxtQSLXqW+$OJhZNm$A>m09RcrD(>w@bPAu=_RM24tw(xo*6pvn@&WbCWAc zAt-sy6*H@_{d^vh*aMKBeFpRS*I-EUuQ}>EmNEqWUbph6kLi^OPA0+=>`j=j0=)ZF zJozKar&yt?%BYfEU-TOW1B=O>MZdY|KVI~Awf_WFbSyD9{UXQ94Q!*E`Q)%FdW5># z6aN4)MSi}lW)Kq{s%p;nYnapTq`N}WWd|jtQG0}@B*+yGN+k>&bm6qlYUHj)rra?u zZ7N_Sp4zBR)h{KXsXc0-1%y8C8V(yZ7vEADIuZcl9UH*p1 z10oNJQ2iz8$R=UwAU@iQ#zm2#26c~8@d8yP>RtIcB$k?eXX;8JsNM&uLIs73>eoVa zLuGjVOoPklQM;JPs9^Z-vKc{Z8*LFby0;{85;XIWIVf%#libCiWHa`v`U_&=?+0?a zac;Qw%zcF#w2G%lBlgIhu^D8HbGTK8Hy$EC!8(Ybw2}74GR;d#5lMS8&Ur3>N3R|2 z_cmiEk1>-I8C@Iw&R&W0X2kucK>G#ES&=q1mBs%8P!U zOL8QAV>#^)CwW5uN%7wxI2%_M^?z^bQByaXTyMO}9N+9$DSuTgRA;49LDzp(m9n3b Z?<%TaT^uy9T2A+aZl}A}?bYjb>tDE2Q9S?v literal 0 HcmV?d00001 diff --git a/models/__pycache__/dpn.cpython-36.pyc b/models/__pycache__/dpn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4017fc7bdad3787c6a9797e83b4fcd6e682b94a GIT binary patch literal 3684 zcmbVPTW=&s8Lhte^o+fZ?d&C>jY%Ldz}l=GqD0~nWtRwjceU*qk9%gT zdv;@MMv62G`(%Cre}Gqh0pfulQBO!b4L^YwIH$Tj<6W&5i5~Uos;|Df^!J@QU9-2c z674yrTIOl@ovvd_Zr|y9UH?NN9qB$1(#@QH zFmt*gES~gX@oP&%1~Pmix{++i2+yW$$`+n2X+IF{m9v|7$H`#peo`K7b+YpDP#qUr zy?pEb>8C@L9&OLD4$(=87nf5Pim#%O39LR6`D15-KWA8T@SN(I{IK( zmV+$M(&IF!`}5hr&GCxA14>a^6EU;<_Don};!H&BOkAK_dVPOlO>Dlqbk1oDU<)Vq z3_aq8X}8htPCWQ{@bb?Cv_zM@nza|kyk_EBkJK_g#%e}eL49>5(EG~7SAS#8^t7-d z(-U*pGC21x_nX=DmjZ7acIFn|=3d*@&Erhv*&r@@XPI{Qdr2|p+s?NDZBH@V+Am7g zlbH@iiIlzkQ2U2!I35+sqmyHtRETnASkqoH9%V||>>S2%FYlFctfS$$j7Nhc&kEg? zSzctZ%tqyrw)0&3cZT_+U8%hk&)d5BK~kniouN8`&DqayR}H%??^fkKmiKh@AbU8@ z^0JoRhf>XRJAZ3$1J`K z5Z0>YTfV(%wXD~0U$@rm$a3+!YMpH?8Ml2Aq?9q^#lVw#cW@W~0F#y>_e_>*GeZJBrL}x3qOnyDA%u)duS76~e28O~N(8Rl*Gd z6RACu4(+5#*>=@yG+Cc(2Yl0(RIk&|z!qDAgQ0qyDETE+iZy^hGOk){R)kdj0eH54 zDKWKk+b5Cpsa^ku)Fi4779ierG`_AN|Qo4p`j8_J2;xpp8g7 z)a!<@#nhgHWYRGf&;b^A>XlC4Q}ucNj(1J&<>jf>r37W8$r6x?EzuXStPz7FZ~t+(pmSqr>cDi%Y* zi?7Z1yMwzp0!+L^km?X0C23tZ0iODYVj6sYw-gi5<*xYcyPw(9aOzEhzE#%BHnH(R z^(LXTkL?+}@R_wybC4*PNdj@4hj08^lr9u6_#;1OZlxS1-0C}+8N0QSA!V6L%1ryo zXq4sB>@CHRRsp|G$A*c|i$YU<7=r54SJVM6wAEYg@D^pIYYjfQ1wxGvABcNE+yOuxRLi-+AnNI;Ej;toKcDD#OBWHM-o2LrOaZK zW<3gJ*6Yj-Rmn|5-uZuUwOi^dG`vAr%I+SkZxi^2>K%ZNl1GQ}XgD0;%j`btWsmWx z#oE$3Ev`^-NJaA zaTFzSIl%>&w`k?&;`{~K31wXkLt{%(H5Xr>(rZ#cuFETOO}--6-?{TFVJgF1)byc2f&`5Gm&#Dt6|>UY-0{QC%@)j03Z$vCAqZqTy+PLio zgbypDl;e5Aof7hoct}K-jj$ zUudhJqxO>MFfto*Q?5M|omV@{msmze@fDtr4ilk%$OiKnf6>bScbx8XkY51s?j}y3 z(C~opX=PnZqkc&%tN(AD-rMEq&&TOkur1;QDhM8e@6eGvEyYZLJysvvk$(iFGjnN8y^-qWW!uqCo|R@>Y3ua)q`GBRl%Jtj z0UBag`8dpkmii6K?G+uwu^gs2gP3tcb5z>Sb0dO!X7sUZnc8@!o{j0tL3b}zW!#|2&4Z3Z-L`r literal 0 HcmV?d00001 diff --git a/models/__pycache__/efficientnet.cpython-36.pyc b/models/__pycache__/efficientnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9381d184abffec93f32521b74f53b81dd03fb351 GIT binary patch literal 4969 zcmai2&6C_l6<6zfG#_5CH^fW$7|d5DiFaZf0vMNV>^Qk3TP|;kR85f*8g=ii=0h{? z)~vmQhReDzMU`E}krby~D6aek9O0HLx^kfQ#yJ=Gz1Ga^c$0us{nXuRb*ta|z2AG% zwR%0c_Vh2^55Hm<|1{PP8}-{L>UR*_=op-FbI3YOU(LSNv2@MuTYabFa*NwfjE=`0 z?&9jdYjBVIPYmu0t6w>`IstkrJU~yd=&7Qo%4_JUEqZFa&Kr2T&d>1;TpQvXKR+<# zZ$#tRU&=5K zo2cvUb}z{errn)*JZcYwjMU{!M9XS4nGBOSqWM(2I~;f0qex{!mLtO#Zj0qcQjzUU zj)D`Y+SrX5U2xFVM7fKiY7jZjzHw}98BheW z&T@9qn46i=XLvD_o7)C(fU{?#ZK2rQerCUFj^0V(h|wfF!aE<3&3z1(6^J6}3Ym_l!(kE@Hct*A zN7G^0vZaOFoq)^nB$Q;IPB0K+QOBT24Yc}7K^rY)oOP_H#y&iVTTlEBw89XPKB6> z%TdZN-+LSr@OwmCQt8cauo9Jg9mKP0LM;Df}=@(j*JR z!c5b`B`3Pd3rmSBB(Aos!d7CqUsRJcj1M9xaHxu^5~~IPTM5H>7%3Horz)4AA>(T( zDu6Ip1G=uE%&L3$c9v_d5J|fxRNh5VKY-9k#9oh0pwiSB1nQ32oa#2=uJ0TJv80nN zx)P&U<-S{{-o6&F2YaWu5b#Y%6SP_IF(!$qUI7zcbT`h1fRv6c6|Ce4G zA|`*PgHS+jO{!I&LcECYoDgm09;BT+glbJZebpaJBT!#v()4+p0l|3jOMMbJyFZ*nA6=dfT&8=z>}3@AS*n#n2G#seYFk! zEKr9r=X0C0fjRygVQ)ZBZECOcOY~{Zdj~9a0h=sbmC1yQ!ka{#BT~sN^q!CsPBh>g zJxr2~u@o*YjnN{?$@kT1~xCG>6P-vRjNb4j3o(}N|bl5(Vc6Be-Noq4U$PKC>??vxv0S*R&Kxc-fz-P zAC#cUShjU@$rbYJXfDx9m@FYoh~Gg`Hy{kZ!CX2mAJSZ$0jpWdRs((1uC4WzJ+p>J z>;IvYUtx^PP0~txKLv+pCKm=k5*lh(n<=tMzt#>en^7YPyUF+Pe}Zj1jU2G&twFu z28Kw(I$o{wmHZaxB(?^lw@2lmA9lVt0c}A#!3<`PZHC~ z?~wQ|iSLnE(Utr@HCJmc&8$g^Kud_$GzpUO5XD&IH4Wx3CMv?iil|DGq9|KIep*J^ zFCwejdWLvHEb~H0CHm2n^Gj6DP!2h>mDznqyE@Ye4;Iq&;-cR}_@OaA>{&tBb)Qiy zz?~|Bu7?!K-!^~-qJWyFfXc3X1M8eLA+#!&JaFpZrDHQm9wmQDga3mAfY6?Pi+V}D zg%$7jP93_$$5pyZa6udyMFSBIqaz_z;V!T5C8>xc+WNXg$6b1&nodV$loo1fr&FBJ zl0y_l6_`o8$&l^@lp2|&fJ}QbAMi$2YJwAywW4;uUtf?3{R>)zL{4s?yY&CX9%Ew zd&|h3SByFQt`BR1l)b!?2f53ud5zbec{(xm=ImkTL*wDq4~@*B#PvBPN{!PLo!FT4 zL9DH{&;wz7)R;3YWaMny_`Uhi-YdZl{gF@7HojFP8(MG{4BUJOjL%?ipkAi z(S$U+Ys@acsKPZH?%Y7kjfUzbD$6H}$|?eDq!aDP10_o+sk)(}PFVyWp!Z?CXs$)s@cog-3S#vlqXMX)Cj^wTVf zhJdZ!xCZk*nnYubbqja#nqRgMzISwBR3({_=9XI2?C~}_j!+a4j=^lu7B~bjyr?N` z6LotPDRd-Yv+Xa{?kdlub*Rv5stH6!VWA8Kt1y5bi%1@sC&+*BX~Msn;X(Q_}h_T1Y^&X~-SR(C~Ar(dJ8`)qDxf=O~N# zK+&PbHX$je4cKh!1(hzt1e3S%%;$pxk%uJ9>0I`I1qb4}0m>qSZPfjjB=yB8`XEqO zQM94DsA!HxIKYYYtLLHZoStYSkL6$r9?-7{v}t5r7f3xGpS3tCt)u4~IM4mjn`;Oh zGqAzN!Zeq$T*>34trJSJPc}$UYIQ2c0H@%5g+#9H5 z_|1S#GJMsS8Lf3}&w=<{Covh2-nWS&mtT&n-OHchX>bKiFDbAVj>P9|S~wF)nPZJ^ zTZM&R9_0rUJ36}sI}utmd?pNA8)a6Zm+ZtKK)oHcHH!_eF0q*vFoheXPP=Skf4BL1W9_?iz)l&2M@B2Sj z82gLOJQnzEwB$aBU@ayL!8=CF&|ls*Tc+l$w%N8@&K)L9VLfBQ3eC2gnXLjOws0VE zMv|g%Md2B1l>~pl8pZRKJKgT#Znzhw^~X_qRKM4M*p63yPB%WiE>!tjKMjudy7Cwu>wdWVz2f^DXxGu2s(9Bw zyVvb@pyd0Tje@e0uyde_QS9|PejFw(E8aNUXt{CotSLi*E*NyT&bIO{m{z$57o!3A ztD@%x9Y0At?>F{$bN#4$9Io%n!0)c#4}0BYz1L4EC$9z3fiKsOyCUo)>xanOP8f&j zTCXpQ_&Lg}m46Gy!YjR58QGrKCaJnMj#5BDqlZFiqwSz2H$Vog z%?3thJmKIB%^@q>TyxmDwy8NgwUC3HvT$G@A`dgFI=(0M);V$>uH*udMIuWe%GN)t z%)n0@j=V%7<4o~BHldGA=r_?eHw?K9y`(kh40FLGl#m6Z z@{yJV=u95^ZF>F}giH`Bpemq;zO>!sF1h|%2%*A411jakFUBB(;vRuW5f%+lAba!VlWu~Lfsyb7epvWhChKsbr?qrFp0K_+r~CVPssu8DN>5uMmR&* zkTSWkDoHLsBtnHQNh-@ka9%P=hl!j6QKg;?y^h}xWs>6^_nUcT)ky6g`)7naiu;>+ zt?^(}ae)@1`CGuWEmgdAdN_hO*FQbf1DxF`4t=Sdj{aheRF~uIpOm3q-WSj$?|}e^ z3@b+-bJA#T-g*YCCAjuOKgB4w6};52v$HbLNDzCLGXbq}6Ar zwsR-4!i1ZSEly;m30EF*yj>aab`>~=vdXyjHaFG**{nT@ea!P|_z#}Oy!?y=pA*p- zN8yoQ5+SscUlAchm0uINN#q+Mw?LG28ikK>#^AV<6o-bX1239+=SJYSrKe}atiT?~}8fAW1II^*iZ(Y0qf-+1#i^igioFOH#aRcC7e ziGEDb$?>s=G(0W;i-YNw Pwr~`ejEY&?s9pOPZnIn9Q1kU2ou^h{KKP~o8u1`yqoDY`}h>R1& z&9c@)^$G+@A_|Gv9hp!}aqC6aDtKhy5t~E~2G))VoY3`Cypb!0s=fl$q^1pNS;ksI zNo{2|1`}3mHH14xXTo0VF6bT66;8{*>KV-uuF<+ib4AB!z95j^A1^V{(nW9(b`Y)* zP7!>B6M*zPFbf`Nr<%{CQczx^J{|7J2a>N)Aap=?U=Ls~2BxmV4aEc*>@DxkmN&lkHaf^%ly6vLhHAV-QVLtXH1HQz;A{Fp+e+qh z^$;xDpT<{wR+jllDC~~5FH(64^-C90?P7pEgvS7ZrJkS%z0y`Wuhla&7EUjk2eI2UCC;XTvyVNs|)xahr&I-Jalq)y8s k&b5Cps~70yelqM#u=~tg|L?tB@qdTKu5fH@>eC+k2aLb|0ssI2 literal 0 HcmV?d00001 diff --git a/models/__pycache__/mobilenet.cpython-36.pyc b/models/__pycache__/mobilenet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f69978e4266174b561556d05c7bd46ceb774c44 GIT binary patch literal 2594 zcmZuzU5^w+6s_v->HPo}ML>ZT<423^3YZv)Q3S*nm=G|UOf+q8SIuq@z1>4~56ezw z6O$p~Mf@MW`g7{lr}+!M=(*K1vm3EHb*tu9b@#1v&$%@>*4DxsFaO-0`HcO=j$IDM zx6tZGFoN}%;KC|+kDJ~a*ge~foPj-Xd){|U*ur_igd^>NKec-SJg)HI@#db6@I~-~ z^+I7iW^rfy+kz555BKdw!0aIkEM*7T^SA2p;Xc3<*NE-^liVNXSpn! z=uTBWtBO&RS7lm6T{%*wYxXPkq>lQPirVL*AMzS&qHl)7BF|Dt860%A2 zqTUQ=*d^;`WEU49Kv(@SjAYn3>sf+h-~LP1=Vrg1p40BK=e}eP>v`B&pxxU=l|30> zx+{mx?tWg&C_}i>mFQuWm(9{K<9Knl_zd+OG#jn{38rBUpRlPlu%?W&#vV8mJ^}m_ zTUh7;_bY$G5*IVxgior1poY5P7+yy&YAJhP8IK^R!kJcI+sZp7+RrJ4{jRaj4yTgtRFOzjTl! zd6_p!qOG#j9_8%1&@O>-O|RTen{2mRsXh4Y?ebbXah=B3V@Er+Ec&{Wm&ve5OIho% z8a4B=`E1N}CrPp*t?MNDgZ+JDYq#2yTiYs2tF1?JSk+s@gN8_THOu>{+S;pxENXN_ zyHYlr!vnQ~IM>kX5Qgz}ex9%LvwXa6QnUlVw8C#1U|ZulvTA!MD5!Ub^c0Fh@EP>!9cf;fRwU z>f$~nyqz^DY{Mul5S0_&9(n>knYlR}~MgEqAG9S8#cP=Gx77|$J8S+Ls9`a3#AZ7=I4EiK^0T1kY;1kz`V?1D6yUvslx2rzDJC6XF{evs+sB`2!Pv)qu&`x!j zKHEu#RaKzYI?r;skMfF2ueLBbuQx3k@6f~l0fOqmCR$Bwq-qlaiVfKDsUsK`b;nYK znTZed`|#NKqhsTEIp&Iv*Emo_L3N2tMCKY9qXEi;sY&WGjh4YJm~1K$NrpCFldc2n zqP6Y^9V4U+4s%gX=*+rd*U~tmr(t__kE($!-)qk+M^r$j&es zO4Ygjr{Y|yN{Y^7sWUN3a@|P^(fK+nWTHCv%c{y{A=6X+c~lSC!I;lVb*ekZrRqG$ z6Ror3EKRDcEI#UdDJz+hYDak@`)9JKtnapsKP#2&2w5dru6Lsumd_4Tl8eioEBX!; z$&eD(a|G|XFWE7YB-|Ha&l8^Tu~`2l^H?vyf&=5<&&%}m@*VknNb~;?9~o9B&oV8c z56i5mNKgIw@ByApH2R$WCq%_6K4w#A;7l23V{go2ckGV&7Mu8$H}J=N>|;;t6omIG zfIon}G4`kE7l$^+M~pxA1jV`{c-4TXxe^u4qp+aYMrzp@$Bxv7zBXm(xlSXUI_f8U z+c|aBPvo&9;C*$3MenIYy#zx-8izBERVfk=yPj#CN>#`_*4d>rfv!|0q-hKjA+n-x zyrV49Gs;ERR|HN)c~X=l6ZBO%8fw)b?=|D=5pbg%a)oi66HrzNxVS)!`alS8% zpW=DfwC*QWdU9B*GuYgt;%@EOCw;%x4@f^Sz~$2su#zRY3H3>-s&rIgUp+1JW7Euv zc$g=J)FvuN)k1ID-PV9xJ%gZ*>2lMIt0vO3@29bTvw*0_eIZ$U7=&I2dpJG{+rqT%U8{5tpe7QbA# z;M-lKBc<%nMH!`ZK0wpoLsV=4v<$FM17K@v?};(+j0C9 z6p??ITg`G%!3&*50kpvx7dU(rM?>Zpq*WQNFnd(+xdye=<0LA>dE z5K9Kx5^suaaYO8gx5UkU0|fgE%P8yO+KVO_<+^x-7)5M|O*{k3EDa>}mT9d}sZH>V zvO1$F%yvl7ZqGUpDkSY53EK7948>kUmFkkPn~9^2DXN+pcGpOwd;tMx$*GL<0Q_P?b` zxu^C3P5%JVXA|f7Bjl8Q$^P>2dvD@Syoo;v#z7xs-M93=!?xHBHfcP6f80RSEqI*) zuN)Axhr=S6dQ(n^$AmxK|BgLvf5&K7#?FsK$&2#zjhi0#O^^(SvJe)(%4Y3N%%U6G zW!qe<-@%}6l6VWkM3jmnI_7juca56I^;NahW%4Pazd)O3g(zx^M`&$+x$|09R<>k` zOhV+6w2tud|49p(!pexK=6)X*Oxd(7`OHMgc|RVOWsW1pJI~}ZoEbRf)Mw~0CpPsib-qnv zNi21$-h=IrX!BHUBTd^*n{VKGxw)FE1;KfCdg=oji!x;eO6Eh-KO*t*&#kt!2y520 zh}|QDT{LxASnrb6#?s&mD?z-ruErja^=b}W{g%3f>V2~Rio~x;d``mFCgH-et-3?n zG8K!uvK&jLOm0o*MCdsD>JFUsC~VqdPvNu|eIIO4*AcC>VpBH3&(wgp3;%1vouVGb zjE*Z{#RU$$DWf{E{No+g3A1aa{`do-DHz`#(8gEzohyt#RQQEcf1m)@xHzNjo*I5} zwG6wX#;c^R9wAQCkc`tbWbWk*widzeSW+wR8O8+ZN-hRU4s literal 0 HcmV?d00001 diff --git a/models/__pycache__/pnasnet.cpython-36.pyc b/models/__pycache__/pnasnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0148434237b551541da9e5ac6bc7e03aefee869f GIT binary patch literal 4623 zcmb_gTXP&o74DwZI9($uV&Xh zv$mz$eOecOa(+bKsN#vAGp{`9FYrRXujeB9N}w23pYA!++m~~`@APTybvohR(|;U% zxgo?q#kt2p`!kg6uMkQMgp$gd$bmGqHMR$~X*pwi><+xog|e0NL?|b*$NtJ51kkw3 zgT|{gEogidKoeA&5So?>p$RL^np#usCt}c69o0qMQR`|0byrz;MDN1Mja&Weclt@b zJxaH49sgmfhxc~E@K!ua^vBz`^z=|CSvGo@Z113Xdrs>Ymtp;Y@Lg7T&XArr_v__J84QF>T72ctMEyhA;m&$3=n zxY-=m(d<_QQ8Y?Nc@z~^nid|Hx~mFzi27==wjbxid;O_Cg3dljulC%+$&$(4qBTmR z*(6SrtO%#`yl$`EPjs40qHJ`M6ke9=kxF{9XhqR*5@%Ty{Z0Jq>Xm!bqvXng9>&uv zx0Bg4yD~e@>9-Guqr0)bax_)R1j;nd(j?!R9c$XDjgnCa*_DBGWGLJ6WTP_oPQ8sG zcg2S_ZF>VH1C+%=j9KJDuJ8wY$$rn$U3}3T7VULZg?y~PL7fqosPXSk^`ls;ExcrA zczLp3&s%-#`LtvjA0@D47)98_e8P4+iXP14N%h5#A~hXGk=c>H%+WiQZHCEYf)CBy zJ*hdN<`RsnLQd~NaP#fJ=!ST09`ch<_C7=dS+4lnbguz_E=H!$&aO;Zi) z5m*N(RAzsKcpjU;q=@X?;8RJU(|YC*k!acXEqy!p#y+u&*)X5-l>f}b_W=4%w z%H`Msd%@Q+ET-&d&JuP++kwqKLJKw=!-hoX(nfEvdttvN3|7HyI>Q*IXTN9?&f4?! zHmXA2EW)GsaWtDwC%dY&O9S%4(aB_9Z~ra!w~nu(?4ALyL>#gCU6hPA6d>Cc%7#4Y zlpC*MFUu8qnr6~(Q+$iU_=IEVZ&SP))$|_wWCm}t8yJ5Mp=lm|;qyOWdjAcW{t`oe zJxo_9w;Pmum4;Ed#J!J}3m-nr1K^vu_lS8FsFuNf2>n`x`!@S1cVTUbm-=U($MA|@5SuW9i{Aw`!pgHA4Bw7Ww;_=^$#fChZtDL zdnnfm>i`Ts*e&Go8PCp0{1LjAmbXF~T3d$3q1A;SYC7-l{9ouyh%sBL>AbF~d5+GE zVSKV{5cLsd4k+YQ$f$6DFl9t$WfH}KTc(Et9Ae0@d>mM*Hh3u_ zCwIr5IkA|+AqFFt;>qMgA29Nw-GYxpGFo}hfU>Ccxvl&yAOr86>O0<>K3#(gTLo30 z^{Nlvn?4(u;{wK|uC?OGn>EK9487*GJ0%C0My2zkh!ZQ`}K4dA6(kwojO_0VJEiFF1t(?Uwy>>2E8)oIVLzDJTNS50G?Yc*2zZY@v{I?~g>{EzCv9urWY5K(V#_mCwVYW(tH$8dTyCMdgVsTotLJ|3VANC zE15}F{+WyBI06Y>7qKvuCl{Nop1Gw-X}N3e*zEQlG_a>Cr7ib`u*hBE2rBnNfWYL8 zHaJ6?#Up_*>CdmBFI?l^7bC~V_!Oa-gi|2v@y@U93?0v{jnkR6&Ctjhpc}iFA=89y zhrTf_9D`NMTx5*SZ*&OG{EBgqG%ATOOdo~|i-+mBjlhhn(5f7P`$dXmazcs8jTcY< zji=Ws!!D!ixrqC7zW##Z6N+C_5I;p2KRm2+r_ZQx9*&Ynxb@&hQ`?PkxUg~UdC_W2 zW)w=sbXLeq@M!(lX4be~d$*3a!7KA7fg{s6m!{_Nan8g|US8Bw?`P;yPkkHhW@;NR&8|PdODoG&{rwWEM<)3$S5EAv9nD1J3Ntjzffyv#|^zcxBl^M1w-BcdB-`duO|6 zt9#bo)vSaxmi&VKz;8fEydlJ|saKxng&)8Re5bl+cDyk-3G}F|s!vt*sXFI;U!9)K zxjBFH*Z1DDdV=V zlG@1Xc#t^Ug~Y8Sbx3O5gT$*OKCg5CDeE+NgEvt(`5d1|J;xXLBIP2ooVy!Eue|jLryHU4@kE0^a(%_-Uw?z@eY4G6W(@b{v+rIx$>>Z^^M@sev}Vr((v`PS z2nK7ij>!$!((Ks0_KfWs+K%;(Q(CX%K4UiP)Ua<)HL&jwyTw*7>mHo8E*Rlh$efIk z?FPlZ2)3d;?&^{4u>ob>iE64n+!pTq4=4&&7(+HP6LZ82R#=HWG)9nw4%+U}7-E4# zERYuV+=aZR$C!GIr^lEy1|!bL)M>_nmG>~8CH~M{W(SshfzgfXt(BCU7-@0mxuf6L z0Po$5HEaJ|oV8*=B zG*;*~uNt;-x;zsd?a6Mtw0XM7?R0;HJO30#!4h(fNzNV_XJ%myf$@RcIPf+@FJaT%mdkiIV%4~(Y$t0-o5{zK!0a|LV@>^H*dgz_53U6Pb zk+e7+U$my2!p?&$Y;(S;rCkW7*93sC#=fVqZ*%7uRyuV^$PBkqQvN zk0F_5!3zYC$#^eL!BAV%DM7jW_L!EU>wj_#3*v3@Z@cpWFYD5o{ovSi#Y(QJ-NA zzhVuRG>94WsPausngiZ=p?O31f;awx@pDt&sOAvEaJS-(dCeQO?TRwYk_^Zpk~WJN zJ7F}k<)iF#|v8m&xi zN$>_0#7>jgnMSwP)Iv{AlS*7KSJ-~D#P+p|-hU0i6TIjD25^>m)w}AifxR!k0mYI~ zTH5QI)S_Koz`UgG${(Pp&@bO6!MCW;!>33qAr_$HHtw7paE^VA{*^D-<{|~iZ)1}D z4i(>};yM*CBVB%vWK*pZA8CA3G*I^}zUOs}dp?&d3|k+vJU*k<2Z#p*6xG5tdkNZjbL^tr~ulgZxz> zl$4#o-UN;D@2H?7Ym8Kc#SG0>g~=a6T4GYeNdt2VN7V=qqXQB4q7xyNt1F!8y56Yj z&8ohs++W72h-3*`x2GQ_r;xKl*_HCs!C|PIxoS{bAKxOM1jftY-NM?Yhq7y~;Akp; zr&ng(dJbA5Bf5x+Xyf`+et^c%-h;FDDB&_z*fM*AojH5#%zbiW=-g$WUq7>lZo)J< z!lgrdWQ`1ip=Bf>QW)Pjdi&R966e04R0=1uh4;1`uS^={R>Gq6+rb!8{jBdUC z-=cvi!{(}F0~@SbNG__DHErn>1!;?e92IdtGARW?tec6)F`C<2!V=9{ReIuV%kM*r z4moX>@xO4j{F0s4scVCZO)73uL42nC=y)&eXIT&Vp?w^S$H@1PiOToTiM*4tml^(= zlE^uA{R$&;GC6~7Ccmb!g!UwDtE_ZRT|dHT_~in1=|hQ3uJe=s##)4z4{_(@MbDUT zc%CF#7x(m<#vUz>XaxkKe|s=r89nr+(lYf_ozu+te+mLMKO5%j??;%E6O5T5X$`N{ zNu*^|I_>R}Gssh8THsgHzIl^l&O|#!*4Nf93D*_X(l5r$}7FvA5`Nn+mCr7|!01a0N%7q`&)`b(d7N4Ow@l$2FvL~!^UThZ#1o?YfEn}1)f#? G?0*9tubE^3 literal 0 HcmV?d00001 diff --git a/models/__pycache__/regnet.cpython-36.pyc b/models/__pycache__/regnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5d985613079e5e1ad90b3f63acf8e342d941c2d GIT binary patch literal 4572 zcmcIoNpBp-74Cg{dKOZY;zW*}5Jnb{6LBO;i4#S!6>G^QCIr#(GJ(-(b`^&`9QKT= znxaHBfdCXNz<_ed$%h z;YoMYICcjiB)$kB32I3bl7m`6zHwXXY?x>Ha1~t+CTe%J zlB{k{lT_+A(DlCDmP+QSyuP|q6jOb@-ydeh&i>X~I@#;*N|oqKg-qsbGM$dIG%2!4 zuKQc#$yR?a(S=l1&-hfc+}_U8Oy)%{i?!+FXa?O`8!6HG3`%f*;8U)yNV4Tf6hp%rw*q8E9UP^Lt>CU4xD|lI}SXH{amd=N! zGQdlvz7A4YgfNia0h-o}1v-B!9RrjH~4pBI`ZZcv_CPO*6~m z={U)y9yGPA*`95hah#45t>gGF)|Z$2JCi-x-%@EZ>ED;riSADy7i8^AX||oH{@z5$ zvFa+xuUzFXsDjUMa2ikzH81WBAFK1Lm4Lzhw*eW8AA`A zlH+|9;RDqIF%3BC-Xs$}SGB2bKQiuQzfiP=iY#-t?TI=_l(>M3rmro#Z7*YYp?w%( zcXcyN0rls}o`m|NIEHri$Hcee_`!ZMu3H*$EGB6jv$E7TX@X9z^;C|>Xjn|&vsD-E zidMn)rB$+SL)&xOKFdjMMF-E+pMT4vuW;yvm3!TVf04H;A7sW3=d-{Bx=@*r zrZG)~0OXlqs3!YU%{>)`EqseV{qFqx-g!mQp%F8%cQz(!j{pgCy!QAY6UB~M-kKD}SmrX_jTK=WMvyh<9>iNF%J=u;YPU3& zmUC}q_hpx*-O)A~XaNV#G613Ngm#4RX*-b}a7i+fcnGM-KJ2nTeg$UiU|7bCIaUBE zDzvDjaH5f4AxMoOfw}=%gJg^;A!^M9#t_OxCB}Fg*urB}{Rn03jb2OPbhgxaB0nHP z0jB5xQ9mToBk~rJ9~1czku{JSTGTg4@CuPdbXQ?b0Pu6XnlxtFuizK3e!8TKPNj=E z!UXDD=%g+Zd6fuhm@!1XM%-fTxh>cXWCCi|+beuW@$(tERJ_mCeM-YovOh{nG9>pSJkPkxwC?G=sIpc_c zNx(PBJR)Ge#k|Wr;Y1`Vtm!acML(g#66&r9FIu8`(So4~HI_N9qm9{3tVn%_2rck# z&pQ#Ot0bma=Lnwh$r^B2=-Gep^v+cwe2uzPA)IaxRfnpJ0Iodb`y?ZWF|Bwn*_H7) zc`TI)xVTc6>vegxF0Yy3H(4$d>c{2Wizt&YEebq7rKO(R`E<yYWQR$ZfP7v7Jwor$UJoxY< zi!nR=10_Yx4HF1CEq1i}8P5Sgb-nO5l#0wRF$o$y#X%~U7CLJB zoUoBK2imq*Ko7gM*5>eNtDn%YjWbb_O!1XaNfCV;Z=GayJK?e-*#a!dpD5xvLe{|m zwHUXCd}APR1V^$^gy5RsBGLL&a0IjA0$@X_?MkUL0^J!6HX~G@ksr?}=&~thQEyX~ z(S+vg1&XF*XIHes7u*m)K}MG9tj+!o^2KIdROn#4 zMV-lE3JZ6t>TU|CE%2v8<<7td?+6DkWkkq_0Nf*wz_`AVRwm`+lQ!A&*?EjVRh-N6 zB@rntF3CCWr#tv(09Q{Jw?s~ajQ#jU5A_PEi$Bt;D2cFeW`#h<&{h=xbg$Y=aAl2q z%j(+@s83nS)2!t=AG+rJyt=l%pQrR^0n*;+InQ8ag7^7a#tCzU{-g$8z9PR+q%WSe pLXkyERjru()Py&xyWKmK$ZHDTDs^qUE6XcyuDlht`4@)ve*qPbuCV|B literal 0 HcmV?d00001 diff --git a/models/__pycache__/resnet.cpython-36.pyc b/models/__pycache__/resnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad0b816d914e8b062f5e2da9c68e651a16134e28 GIT binary patch literal 4423 zcmbtXOOG4J5$<`C96t7GTTUFCcAO{_R{PW@h7mZ5EXa>Igdhd5z`z*HvU_)j%blTm zhAZuciv*wmU84Mi90Me$Aiu-NA;-Sv#J`YB@>S1}oRxQNBc8!j_f*$(byw9_U9(fK zdppnnv3It}*uPofveCbdC;1}?XC1~3ZuX6ip`^oFYraQ4Zg&e(Jt^6zKV8{ zn-5v*m9sA&h@>sjW*9Z^pZ+G6-NSXyyBEvmeJO4R-84K2(lCyinb=x05u$m0AO(he zDUwK}>x0wl_~wDw7g9uB@xJ%ltuLCt3c}+sI%s|@)|!vPAUZf5HamxC*P0IxgZMFx zJrL1a^HU5q9}Xk0iR-Qq15y?8VbE`WCIT6e;6B##$#HNXFxHI^qL7xf-VEfU@Z|li z-L38Q&7Gb1cD)Jg%i3L{(~E3RS8@YHFxZ@ROm4v9?sK*eKXB_=rDJoOJFtrLoY}17 z!ZIc0-3gMgd#4|Fk0u5_?oOwowv5NZll%=NWvMY@W3y+DnZZ)4XOD~#l#DEzp4%_n z5%V4J-H|zlq+il9CdoizE>kYZRM7Xv4ANDa=g3MFzvgBWAT+cJ(aubpk zcV1+=a@nylHfa{lur0Sep=C=V_gc299tjzVzMq6=Lb*vQLoQTl5O5B=D|;^tl9nYM z`otwd`)<{hlMDw!N)vCY?E4`+;`_>sBISM&K@cTyF&1biqQ9>yVdM|`K_n98X02XM%TN{H z@AiWv@%_KD|7>m?#>Zk~Pj-WNW-oV@0$~+j>r~B>F_UR&HQc!$|C%FTXvK~1FX75=e0}jA_ zPH#3Ihk`)A6UU|TpRsdOe}fUo5i*T7LeRLUY$^IfxqwM!od_*fBEc}Z0s`N>f<_rn zl@$+D`8o|~Li=1=|aXb-IO-tO;Yg8?;d+N~ zy(5=NNXMtG<5Rv$jMs>e%6H*IU zop1SNtjZ>Y<=aFE<}blg{*ZXHv@o^21|#|S2A(+>SG3{d3~fIFH$$5S%2kk4RsT3R5`I586;ipn z*~;7Pyxqy$Tgv?`j8H)7fB;j!APea8!r7y;MtRZj*w@`eRjIF!Qjtu<*Yf*pA#?Q{l2kO%MahN?pijs9=#1aJ=#O&Cjzk%l#7-HD;FdT8?sq!9YYj%QyOf z(zc|0)GB4EXczq~75$WYv@%u73Lg{~+NQ}IFc~r__CLpyBXbp1frk{eXq+uicRhE^ zHmu9RDlH4S+MPwU9dBMqnKZxT#>Nfi%6i-lzp+=OqFql^zU!Tc* z2qt8B%4A-C*NkQK&KkMZ=DDmz)0SS&Wt27~8DEzeUzf`WL7(8sb6KNaE=%HF$YrxA zq-9akL3+{m0Zf?1(Hcftrv6kHCWT0tfkBnCdXw%#gnT{YGt6>KnN>yPf3^ z^3#Nt_HFg<+=?MR)%(luqF3H7=+$Tb?5N0guFJ@Lzq^}BLGPXKqIW{Iz;wS?>aFB< z-rRapP<;3QR6L=AXF|%pwc_3F-xU;pR8&k}pT*M+yrgW*&XJu`WU1+~HMRjrBgH>J z&mJ?%V*2=i=Pi7p-9Krf*K&0DC`bRRC`SenkCZczVU)@bNWfGUezlcxS_nw(8#@(A z`g@4z$Y+L&yd>N7$&xDhK99R7#t9j!tS4WOB3;s{mFesH{xHISejMQ^%$xUhrcmy$ zb@88JK|6|2rB^E{@+5_UB$r4skJemRVgWNh&we95ATK2JM_?OeHR;+*rlx?eSRo|+vx^?flr^*MdR&?;{ zA4h*}G4^kE>7(crTgoq&uxG-C+og zF9K+SN>dj#5x!)@NYq7yx*-~(iMlCTqK&#Gw!}8-ws4-Z?ybw>3#kY4tD-Z>I-g&B znXA!BFN&T@*(pwBXO_&Q>OA;(JXUg?6jF3Bd?Mya+WAr?nLf_dSyD{$On1=iJdtwN z8OXUp|3DTma&@X7*q*ao$&QdkGD&qWT4A?rFe15r$P@JF9)x7@02?}jWB<<36VWSn z%tat-FY7~Jgrbfe1h1IKhBfS>Vf;sFK03X;_o>R~Ggvvwv*&p_C(AlV)YV%#n0M77 zK})ST#K?!BCqSvAxG4Hx5Ctpvf-Rk?vt*ntoQ1o~uKdECdJDerDI~&u?JZbLx?tfi zF*>f4<8Lz`wib@kt{ zel_CMRsW=ZJKsi&bv>*bye=)<%(irB7Qx`I+Cb5UKj}KAaVk|N(^yX~rSXm?iC&Qp zy1uHRGJ!7CL`YMcB|=QHu?fcHV68$L@(=QE%S(JR#lq&_f!<+HiWib;~1P@m+g z7|n}rU_33;V^g1G@hnX;sZC>)DA=2%lj6c`ALT`n%1n+<;};WAoLGxZ!)~S2bh)X= z@hDBSj^n?x|LpIb(l!A(|P2B}w z+TWE^X#u^dzi`%Cyagz=Qgh`1uxuLAS0A$K8;m7A!p{fYy5XM~PswzyZeuBRi^Mlb z?2@=k;vEvS8N~UUs0=?hZay#6cd5gQ2c)hc@n68*-@%J+LNILg4&R0gB7WH_w_A$onMJsK1h4x9QM#0s8f%rdOVX?b#k(tC7#3sSChV11;^WY9*m=L@Wt3%kOi2c^AP^L9`OA$*d zvVLVV{+7Z4#?<>HY!tptt@|XtMPiG@HVKQU9cmH4Od~!^PGy`X7gCwPR{K@muj&I6 z{C1Mbg!*j_60*39viVtTNnESVdZs~Io3T=_!@U8eHLi<=HQ|}EF`%DA8M#A0=W@FW z*mWk-HtC_D5&Jzx(J92j8RI-2b2y&gWxMPhcIA%Qm3RJ&h5Lm4v5&m+ruM+7vX$YO zaF*VZb7+yL$Y8h~T9UEz?Eddjdw3VlN6bO+DeMdH&qU5{NzT^k$c`YH&15DlTuUEs zTu>&1b@{unvT~RD5bcuTY9)K?%E9_NKgz_|$4DBjDU=L!H!=3^91s3vBM=;siOz;0V>FRl5Fs~(1RYwAa+6d_Rkn1oFh^0(zJ^;2qXR$9Z? zvK8ePeXMDhA#bL@TN*=Ve=TDkk};oSmbEbynzuB@;_2&n z=I3P0(;LP-dV6Cwr{dd&>7al9Q2g$OF$9l|XI#8&Fhd9}*pgilInh}|sBxFx6;2qs zGT_4CA{o6UBLcIhgjbQ0g%sHevN=R$e0wP}KK=^`+|_1kk`?OLG{7}(CJUQbhF@HR z-KLdk7E%|nDz8s}z<7H&Gm!K)E>7wF(U$ Kc`tO!-~Rw2FWl+? literal 0 HcmV?d00001 diff --git a/models/__pycache__/senet.cpython-36.pyc b/models/__pycache__/senet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a771c603f7fa7879c1f4bcee787e783ee24ab60 GIT binary patch literal 3906 zcmb7HTW=&s6|U;r^z>Zfc+JKMuuLulCTMN!4J05Gh20Po1jCAr1hPP_c30c(@wjKU zy1g4)GZJD}+NaGEe*zx!$}7L3UU{0ozzdx3bWhKCZ9w#>PFJ70pE}=n&fM#Cf_u-u zI{e!!Li|IlJr31Brjn0HC~+W^RMtoyNPb&G`@rU!Gqi{9f%kKvY~?%?%1P{DV{RY# zq;Zu;8n4ncNz+h1Y5YnPsHO^@iG!AEsW!da%6cUFoy*@n`q>~UdTG{sc=7pI$0s{M zP^t1>agy|&rdgKg-uS5Z*;#a)Q0Ke*-{1RSr}sHEPNE65rFkzK7d@Sfk|6P=sn~KmSy*IVy)H7^w z9}9h`$EuIN)~8AJU@oY43-g@X`2@3`+WOCE;Tf=sMXk1SU)1x@EsWuItl+&NXobcL zo3!mx3mryQ+Kv)*V1KZ{fi%RYeaEy;6P+caFi$TNGNU1bCHqK!h<$YVb z7~(;|B8v?YeaYH>*SPsPU6Zy*Y5Xuuv$P09V`Z7~9*nam`^va6z4uJ(epJLKgRwp% zoqd??l@0r-@0;LJ^7uT-iZmJ-KR+4kB0evSeH8CO?9*PIJQ*FCW}1bQQIsXQ@ya*v ztS?P74C7Ih=VAC4@z34elkr)yd#K}Ry!)GEGR}7=7X@7KZk!%PdiQLsl2N{!CmH$E zg3r0e#py1@Cd8{GOb|Ue4kzRBh%Uu}8|be=-N@79vvI2Q zZS>qC(RcJ~XpvtSdrVg&pX#rpjm0<6(l{FHr;%3Qq>mgc2zgs>*qda)4a>K>GLV;> zYX+>klPkd;`ySjUi_Q{qpCAnBg3d>%x5M!9c{D0p_?|Q%ZaUR{$H{0EhO5ocd~Z;@ z#-7+M1FGbVXW89kO-!2lK?|P$k2qQnb@J1=VCLZc-^?6e(Zn|~2Qx&8=Ibm{F$edl z*y0+CG%LP1s`%nh6<>JFN;O~b9N-Cdv1>d*+W){48fIDY#2N!MpaucF7AG zR@dKy0IYNyf?|^>y|9DYRaexvpo6Qi3$o``PNxA%{W=6_XnDi6>+uph)RJR3(977B z+ZM394diaE-Pd*Kg5hfDen}q{1g#Rb1TJD$f~Te5VfYfvGHRQVwgfW6+N(x2N@!Q$ z_8Htw1-M%?r|J_-9R@ePTQp01`Axl@3fwke&0w|%rC|WAen;P-m;M$6AZ;2aQ63eA zF8i=mf$b9IJ=ETW0I>C25CC=GWjyP*p?C*^P5*7wt^%9k_7UkT>uwM)2J&_dY@c{> z-MTO1T7L&~eixz-!TtaU>-Qk)#g^B~z=h}BrLqL>2K&;@z{Sni5#PTl;ww)zDDt}; z`8{ICrg1qDS-?$ihVEaf47KY|=L?Jsbf21JXCw^zBODBE7Yy@6cLZMJWTP0k3dqZx0q9(~kcq zZZ*;zGBtp-_#;NZsTP#eh*VI-=x>zJh4IU)V(>A3SE#L#Y+B)2bee>t=pxa^IX5&H3a8B^RtBw!}U&d)-w=B9-@$MYm#I>uGJKco#-VWulRtBlv% z@3^xH2{ zR~Z8Cq0{>iKY;ilM301Vo}|gslDs~oZwfg`LDnd%=8n=#AQeTW&OQP%5q@n-;K*2= z(pW2k0&~#4+(b|?IWymXV<~{-*H{p@=*w=~_ce4^c&c_djS+?h#=XL9Jw1-zN* z!zsO&>0{BOu@oOLxp6sbHm;5`l^J)U)2!et>08F85;4I z&oF1lG{R7gW6IY)!Q?(qzLRB~ADCOq&z+<5EH2VdKXDFJ literal 0 HcmV?d00001 diff --git a/models/__pycache__/shufflenet.cpython-36.pyc b/models/__pycache__/shufflenet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..703c5582453f2889b82dcc08f9d4ed97a783d79b GIT binary patch literal 4019 zcmbtX&6C_l6<6zPG@9A9`*l+~W3A zgWJOD*QQp-gT>)4EN*3~!&2iOEM8^td7b-DjZT9%coTJ#ukcmWE8Kiwv@V^VJvba4 z93-MGiXcvdPfk9|WcP5>_a6um6o(=hMnfTk8;fpl2Y1ro{U?PK1Cg8r?;jk*-B_eW zu$QIBSu!f(ERB+&Ek;sT#p6sK1qYc7KF;>zLJ@o*Q2Bl*^Gi;;dU{Q{0R1XH!uQgw`}F)BdlD~TgnT=IE{-iRBlgZqha1Mm7R;^Kssn{ zF;x%4ZW85r82--q$6H&6*+6XV%WjlyeJX}ozBN242-jQP_#l#7gN%zL-^xqqQ&DUV zPb5V{10^TLV5{u(>&wg7oK0cAo*KHjD0?XRJ%};u8&l)w_Qaf66MN!}jXs;Wu$p6Y z?2fImd)*k@V`u90bzl*4oStm7Te`)@>fO^1_YR{p6-iLd@a^Dd?Tx*S`x_5G4{qJ2 zhu+5CR&Uurk7Rf}>rr->4D&iC49~Sj*q_q^;UzndPlX0h*~hVXtZKk}Fe-$qNs%8S z?pifT3smfhVr|9t6uYn3L&bX1gSixS>QF;?J&q*5gom77G0ZjQveOryL$K=9a><`* zG!XEoABOP8C?UNWhL1*3QnqM+ND33RQaQIPk|Ycl-)=EUK9Dp89Vb2%?LLn+ZEfko zGQVi?bp1D>?BC6bA`z)TAXBW-MIWVa(s=owAua^6MS(mvIeW=K5P(yTO!+#uQgdui z+`{g|Hg-6|f!u$FFbT<8Ltsz}ctss8?iuO6?(Lv=ZHz=iZFBFehL*n-?i+G*vD!+74Gv7xlJk+>MIvVLOup*sT3@yW)l_Nw&*=JJhsO>bs=xDsbte5g0O# z66NKGnJl`aLe=ja_ddz8WQWhfydJ0FFo{x;D?b|*m0nJjme}zJF`rpqgkctdIvaEp z(gLU@e;I;E#5Fy(#;&j??&~PfOrLq|^ztIGzDzAN43#~U{8tbKs04IkqF7@#)%S#+ zKpj&yF{^f>9G5I~>vqIS-zzQpxlV6S5TSjaGsMA=-YgpZCQ(q&_mnL~GLqlH^yIflyiVc`5-k!NB)&^xlLUEOIhux*)s2dlt;t2M zlidQ|$^z{yvb#)cia;7IUxhFLpO3Nzxyeq~mmr*Ti@b(@@;Zr^Na&E#aZD~*3_tUo zb{hrX6_goytm|;>!ZN=oNa@OHSzomemPdk)qxVA>z||0d^G$xfdr&|TQ_{Hk?;$Rb z2=>=&23!XEKnYb|I zRqjV|Dk5svHP}dRUsm^^rzb3bK!XzUGw(m2xo@E*r#PU@_2@QVK+d}Cs=iMzFZ{U3 zS(-duG!&IRl$=BK;I1d16$WMBd&cMYu@^Wx=U(Mz3a-Y{pnQH4@H1H0q&BwuW zut(Ic8gy*n_L)6p)d7h;T777Mx1F*5YdR`9?FF!9YF6B;SLrH8Mrlt-jiDP2ha%-A z)oQqJVVW(k%tP|%0-y@hT(MYZZpTN;&bUvbwt()3c=Ikwm7_MmTVw0&Wn}5;m1W2l zE}W+Ug+C#vQTrv7|HO2_#wj@un&vvqel^-}rzR4{B);p{!DNiv+5)bt-XIeg&UVR?$lISqz7vRj)3|)yh^k#ZX~_~p0O1n$5IodGtlef~%Ei+_Vx;{a|ePY8OII-#vErx<+_nRW-& z1vuuv2>9>$VCSdkkkd#8c1y}S`D5t+-(>%e5#qnnWdAUmto#tR@|^O&Pj>eg807iM zezZJUe9WNS+oNu8Kos~V0FL+zC^6>N)SiH=^+rs|jzDGj{RGB#Tc@y|;(35~u}2aT zl88yCGEbE=lyO?%JZ9m?rQAK)$wheWLgYmcZOAzE2_!k5|26GaSL&AR$@OgZ(J~@4vbtu=l)KfjTefgc$L_c-?`&mIkDZT4DD7G5;yW7@k+@IB$cQNNwt*tkjz9rBz`HWK~jtAkkm^_J$gPm6&?M| zie89L%l*aZjND&}&dUAS&z#n5^jy@qZ?xtj>zdJQ?*8uDR)1ro8?VRt2dj-FZM=K$ z{hn%XE&2YnIBw)yabqXkiB;p&WMbok)eDVxRoKpxcGzva-A|&po1}5p*yyRoTN@im zJBic0v9`Y6coXkR^0=M%Rou7|XUS%IN~Uf1Ac*2TOuE^UzYpCR>uu7}<&;cz225-~ zpGM1yyibh{=r^+O&p@wEzM zLD24oSr!C;HvaqC@>Xvw@7&9&)tB4JMyQszdr(-moN3jkaejMs zY3H7D@va(LCR;b>%-yG_ce6BF#CS(^JuYC44_yTrm>pwieB|s|d-k5Q=MIdHx#vM@ z4XlATum|2rW8e(jq1zFsq$@`3IT*KP#KHc(-5;-Qg=rdh8|4}=G(KEEw|4HGbJss^ zy!-|`o9EV+H>U;cQgCN9Lr+&G)}^TTeRBK4af3}4Dol1`QDNcSPU1U71sdP(=W$U{ zakd4&YgQCDS(tYVbG37N3gW!{X*wrIq7Daz4W=ku#<8C(yCQHEksu)*Xi8u@y zv0~Mnb41dHWPDKvyt30EBi9zkhHKxi!m%YpOgXk&_}6y2NiJ?Zw*S}jXhp57?~u^b z3Ri`B(ra3xVf8FTqkV{OWiy=k4BEsck51O{xI=PdwAUIRc^@RFOCk~u0j`eqlrc2- ztO1Z58Nan6V_=bLo^-=Gf%#f8z{umH8;n}kx*|-Pc3~%JuI9k&<=c8Iix3oMdtx7L z7@F`~Xysm>GItlJ_A+t}Po3cVPLgo;#Qf^p?2i3zq%6@BwR#M#GFMthwI_R_Q-#D=X!Y0{s3|?qUH9$qT&N5cM;vR`pw+yRM3;- zDY73p7_UM$GY}&M{&?KMvBh~-W4R#e# zvvw<1h${h+UpP0DFjI9byy>bJa4WnlS2#@&2*M~z(oG2tiZE#&(f<29cx@Bv98sFE zP|uOK(zNi_dg<-esBqi3uN1Y*VcyNe&+9l}DZ^b;ds0zL z(x9y)Vb-e2rviy- z!c<+ogwGb{)xuG6x3A8jr@l+FMDjAp_eqvP3Rh0G!fuDTT7dv&B>a^i(7L-)Sa+f7 zUO%6}W`ae&CL?&Mtz+hbG#{v(($EN%raFar&2I)ynx%nOf^%+OQPM^mUqleEuM?4H zuJrDtIv|f8r|rgo{5v`a0g0Gqo-|-b8}`%CG9w!h!yzmXb1{PBT0mPVXUSm@c_mOe z4$A{tGlz`(^1L3e$+$hR0bqVbj(Yh@eQMV1coxU?SLUY1=ko;t@(}@YWnB(Vfvm*f zt0i#Ni`cAUWSc^)C79OeUDW8kz!~&%b(W+_BHr~qa^h^_V&ZIcF>y8x)K>`9S2X`B z`Bw$B+VLFic&m7%sKenVu&r+=(YQAX+6Qs_Lp&vxrzcjeDZGMz1i{ws;@IwA0pQ=@ zwE!U-j4j}_aufvUYtSM(ACP(&L2NFioL5Dg(NKrm=x2S%YGe4@dA&2s>}?9zbB6$X zehSLxu`u-t$swRtuXBJZQE!m^h~!n0*GL|PdCP&eSA?#rA8>}~cA{V&yYHc83n0wO zj#&bBZ~*&}i84o6|7IYkXR(zauRo*MI~?Tnv(X8BSlXrQv6d@!g@@=ksVk&T8Yf`H zJ2OAxKl=yBLA>JtHcy})JYnovC2xVZbEo4<>TTmosQseK1DHPWAg@G-$>6II59S$| z8c{o{A}_K@hlu|{t(3YV1r=$%l+K70RHU=~7T!Q9swS+4U#yP5Imd765&7;s`U~Yu zU*4dRUt3ofvA@Dk``f`y7FYV<-luuXP~d31PeK9D?Wz2qnNqV11eZ9(Ku}b0Qg0-i z*^?uJfVviJhqvOO8{Ugm;mK{Kyswt`^ZIzAPZ!?LP>_b4Dfgz14@DKlmgF9?8}%lp z9v%`xjATss7!nWyEMM*(4k`W?{~xaAteQl;NkYSdS-nm24#_wa7JhhpGuY|%x~q|5J*g{kN9Cwi`rn3nlu7?)| z`DO1v7bQ7p>oFY^iGVeAy4nJdU~vsM#R69Sg5(2AZJqX9r=U)Oqkc*LS0pWxCr$g1gCCK6{AJUA&B1`= z1_?vZ;~$+bDnStS+W7xLwqBJKd?`&e^pB3ZOB?+Zf6(>Puv_>OUXW0DKbOBcbpFIT zN(T>~m|Ck>$Wa$lY8xNvMfG+4SIHa1e8$Y&f$mW7*o*Z=fAQqvnZ>h~={ab&Zguzn DR{%bz literal 0 HcmV?d00001 diff --git a/models/__pycache__/vgg.cpython-36.pyc b/models/__pycache__/vgg.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17dda4646985e66d0511dedbb14ef2f2590c61c7 GIT binary patch literal 1676 zcmZux-EQ1O6rLH6z23hxENw+gfm|UiqS>YqMU@aL1SvPoDr%vim5}9nW;fnA_Gau& zlgKMo+DN$yPrwy--0~DW!`yPUufPp_Gv53NI@X*sbIzIZ@pr!C&33!7dH%=i-&{ie zA~(hdSUID!io)5F6>4bM`J+VEB0cl&Ss&wqKt>c6%$_IR#h`MN;5z&y&*qu8APc%jAj0|dmy(HcC>HSyR+Z!9bjm_T1 zlitSDwK!edJt;C3?X55HrKG=j2C3=J;w|)oP(otD2)Oqy6RDJso%VwuPU9j7bSpRr z_hpcTCsO6wiH0NX4`o|eR<6$gSmua5MWH|7TMx!7N4`i3> zdJx!%Ao!j9{jj%}9mw8m6@^*vg`8x0Z&CsMFp7tv>K$Z4CV3C%*C!{ciJWd><{kv0 zD|D5fwgATZWtvy0Tq3e%qCbNG4y2@WI%6dpQ%okX6V9B$0*p#l`G5!SD-@J0toR3` z%hd`DnjgjT4L&J8mFhhhx{m4?J3ZD;HZ9a`Y$|cbm}|pKy$O{t=G?$ZXoGezPn#y^ zrS~djG#4>ZHs!ZijK~a0&Ya_)O8Oo7d3VN3F4#U(k4jdMF_Jg!bHPUROGF;7SSu?X zL{Y<<3yvu4<`mjAoqs_`j)^V#%sE{7k=PS1V$pz?_aN3Q*)N6|_5Q-(sEk+`o}r-4 zeW_BJ1O^1{<%NocRCldUeFCLxlTe5_9es~l*Wz@Ngpusllqs98?}W#@S(ZE&+S|&~ zBk1P0VG-^1Gj)Ise<7d$pzGfpjh0c3m-28b(;^O&u4}^Ikx{Ph z!}FgSs1|I-o|_a2<#-y@r*~NgauxG7Jzcd7xIB93gS0{03@6yX59*c!)cgQ z9?M4c0N2{L==PG>kxY^xxK5-?)%zyMaKd(#;gV&rPg}0su^YY>3XcC>4MW7<_=DpKuE zRGb#-o(XWYlgh%PsOiad{x)&V3z-*P#4VXzp{KquCw*ND0+B`dYV!lqe)X$NQ(N_A q#da)+HVJ#`#tUrU*p~6Xs<8gr^j^LS!BL|Ie?Zshr}Q!JeE2V<7 Date: Sun, 10 Jul 2022 11:35:57 -0400 Subject: [PATCH 28/54] rm __pycache__ --- models/__pycache__/__init__.cpython-36.pyc | Bin 493 -> 0 bytes models/__pycache__/densenet.cpython-36.pyc | Bin 4124 -> 0 bytes models/__pycache__/dla.cpython-36.pyc | Bin 4234 -> 0 bytes models/__pycache__/dla_simple.cpython-36.pyc | Bin 4085 -> 0 bytes models/__pycache__/dpn.cpython-36.pyc | Bin 3684 -> 0 bytes models/__pycache__/efficientnet.cpython-36.pyc | Bin 4969 -> 0 bytes models/__pycache__/googlenet.cpython-36.pyc | Bin 2839 -> 0 bytes models/__pycache__/lenet.cpython-36.pyc | Bin 1077 -> 0 bytes models/__pycache__/mobilenet.cpython-36.pyc | Bin 2594 -> 0 bytes models/__pycache__/mobilenetv2.cpython-36.pyc | Bin 3099 -> 0 bytes models/__pycache__/pnasnet.cpython-36.pyc | Bin 4623 -> 0 bytes models/__pycache__/preact_resnet.cpython-36.pyc | Bin 4543 -> 0 bytes models/__pycache__/regnet.cpython-36.pyc | Bin 4572 -> 0 bytes models/__pycache__/resnet.cpython-36.pyc | Bin 4423 -> 0 bytes models/__pycache__/resnext.cpython-36.pyc | Bin 3547 -> 0 bytes models/__pycache__/senet.cpython-36.pyc | Bin 3906 -> 0 bytes models/__pycache__/shufflenet.cpython-36.pyc | Bin 4019 -> 0 bytes models/__pycache__/shufflenetv2.cpython-36.pyc | Bin 5440 -> 0 bytes models/__pycache__/vgg.cpython-36.pyc | Bin 1676 -> 0 bytes 19 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 models/__pycache__/__init__.cpython-36.pyc delete mode 100644 models/__pycache__/densenet.cpython-36.pyc delete mode 100644 models/__pycache__/dla.cpython-36.pyc delete mode 100644 models/__pycache__/dla_simple.cpython-36.pyc delete mode 100644 models/__pycache__/dpn.cpython-36.pyc delete mode 100644 models/__pycache__/efficientnet.cpython-36.pyc delete mode 100644 models/__pycache__/googlenet.cpython-36.pyc delete mode 100644 models/__pycache__/lenet.cpython-36.pyc delete mode 100644 models/__pycache__/mobilenet.cpython-36.pyc delete mode 100644 models/__pycache__/mobilenetv2.cpython-36.pyc delete mode 100644 models/__pycache__/pnasnet.cpython-36.pyc delete mode 100644 models/__pycache__/preact_resnet.cpython-36.pyc delete mode 100644 models/__pycache__/regnet.cpython-36.pyc delete mode 100644 models/__pycache__/resnet.cpython-36.pyc delete mode 100644 models/__pycache__/resnext.cpython-36.pyc delete mode 100644 models/__pycache__/senet.cpython-36.pyc delete mode 100644 models/__pycache__/shufflenet.cpython-36.pyc delete mode 100644 models/__pycache__/shufflenetv2.cpython-36.pyc delete mode 100644 models/__pycache__/vgg.cpython-36.pyc diff --git a/models/__pycache__/__init__.cpython-36.pyc b/models/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index d7bcd5c95a5c67d70d92cbef5a4be3ad4c85ec25..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 493 zcmajbO-_U`6ae6Mkl|;*zazNAL`i10#&`l3qboK9Xn`iBZ74HwPT&Q+gmLF_x^?9h zTv^~n-ATjy9xr|E3!69&H_z|o8%F2@{dpEPzq7A?v4J3hIeMJM-~x2OBhUqxpa=S3 z0EXZSjKCN?22a3K@XWe+K#B8dXGRG&`0_qEH_pCkGKVOUxtt0*WUij@lqj_QHa?+Z zDw&{>vY9u9y4}_Fl$+=`->=QGr(;{@O+F1-ta@5hLq69{(8-d`jjNy3_C>U=S;eRr z#!Kxf&3H;kc#&(?DNgN>ym&=@WT&6P*E(%wN7J&e3Yl)Hk~&p~LH1R9Rk6D0(@qk~ eb(-f)up!T{)xr2Twz%W;cKiwY!UiAzx4r@Fc!aG0 diff --git a/models/__pycache__/densenet.cpython-36.pyc b/models/__pycache__/densenet.cpython-36.pyc deleted file mode 100644 index 7e67fe5a2a1d75a1656da04fa4b473d90c96abfd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4124 zcmbVPTW{mm5k9{y#7ND;I_cCl&N#7LW#`qV$s*Z!D3oTmbP2=o{F(ta}}B{|tP-caD23lB$gX1@96@b=nTaQn&M zkN>eI#J|MKWuyNN?)yv;C-%sn zS%Vg49O+`lt!Dzvc+$s=U(d8$#t~razk#S-H=WN{YQ9<0{~4kXg;9!`IWlL$5CyzZ8Z*p<4*G5h z{iV4hq)GZevoY_{nrfEU`LwPYmnQr~E8Wi=tZYdEJG`Sg1!8VM-0PXTeLqoIl7{*4 zGST+&Fv{m-OwUpd8tqLYDTmpqa%nQqPClI^N}2S`55r+LEW%KmS*F|XMn!yfG*;(* zsjcJeP`h`=*+UGR80{g=^{alrXK6c6(i7bpX5l1_vLx5-Q#F26oP{bXlAfVkVHl@T zo`>Pz#J~FoXXEqa;8?}c_~5t6WSk#NE(!|HtMTw8QU~W_nWXuFr08Wyu|K&`0ZgPj zCn1b=qhma8Y#Nu_%P{OW8;EH?bkWwyJ$G>D??9Adgounxv=&8zepfWI>shBXo7uu0 zd8Jtzr$F+|#45s+d89~T&sQ60v~f?{DoLjb*&);xiA|b!csJULqXMq)^3qSWHJ%n~ zmlm;jj;6hnv3e9KN$=)9gfMpT!_K-ZJF6GzM>td?~x#cs5XfXgf@>ewN4$IfLn({ zZPQGEfYg9(JhF%kt9b#dax%C=+~xLiPhWwyAK;y*qYX&2XnNjNVk= zC9zQ3c!hC7&L!g-oH*?67&D8D5JXqL577{*446u7={#=f-UIJlhfryO8!a%VwIiKSmrHd%IFNhhv;Ws0C?@fBMRRp6pX6+rpEa4R$jJz4xB3vuIt5q#lt!16HV00rb z8(+|3)LJ%K3r086vh@WmX04^mS}5rXLwn^9;4;)?Rt!s$FFm8S1ICtFwBxjX*cB-zV>!X51Bj+PiW}dt?BwRHw`t>XRX=K5%M2 zSpR)>uCV_?=Ky-wmbi8o06l*F4Pn5BP2y(LxK4~NMk9EEW9Qg7kWjrjW4SeO%D zg%JQs^b2sb=|Wy^E*#r1x21lJmFg!XUV-R&Po;8-6jy`)$LEy6RARVV_{RE+zNcvJ z8lw|D8Jd&`M732tmi8(597j>BI)u_PHf>IhnbRR?z6XIcqO?P;X1<_0w}Prga_M%n z)5HGZ@31K6Bb=egJ2$e&f<1JICVLRv_vnw-*{yivtv{?-@xe{4Ag{nLY=ygG z#r!q0=WChLf9>#(E2bPhiz&+hEgdqakgH#k_z>c2xaKg#drg2oLjOhqBG=#x=1Z=r zOnLtqOj(!pcR|Qs3&$(g{ASe}3xfr-5*7+E6IXoxuR(0h>?<%&#X^Ne{9xc)77(fr znN>C9xY>%7t8YLs!OBcK6NOTSuOAB^DT+>I+PGMOHy`lrBFT#c&+5~?iv;%#MIi5W zwHJnR9OG!t0Ab}me>?1FS;g7g&2ax@nsMQZQXMS&%tYG#kUs_Zda=ytk&-)esU1!8?G-~a#s diff --git a/models/__pycache__/dla.cpython-36.pyc b/models/__pycache__/dla.cpython-36.pyc deleted file mode 100644 index 71dce2325f0b5f7b2cfe305b9679c79fcefd0c3f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4234 zcmb_fZExJh5njIYcrUTfs+y{4dufxj7sNVCcI?1tgT!{7rf`M8aa#0%f}pso6Hh1J zdC5~G;S>Zo0|pA{Zxm=h_LubYe(k6G3;B{hvm{S%ZrTDR!P(v6a9?KTnHk=0HiO$Q z|Gxd{b;kb5&RsV8AK}SffN<7l+~DTO=o`8w z+)2{TXGhP*GTvPef~R6fNRh_ke$c`7NQg=2NpvJ+=i|XZib0eo<8-~Vo9C14eyDEvcG}kEH`FsZVpX4Wo`}a znK8p6GmEC@_NhB#p##1sr#GPh28&?ru{#*>obLG%-?s7$#|whoLglRJjkw>A@yfPK|N5~UeTjt$zG811N9l7^E} zl!{EbMXR^dHB>DO<584lVfa_}&CTBKcwh9kWgLxrPsL=M^(IF-IoZuP*@+w;vJ~@&Bw%@{&t${G3W!T1w(J%t^8fIXuV%);$_{NzhtXGGP>FjioCdnfn;>jL> zzdae~+D*`Ww7~9AU)Rg8vzhl`X|+Dqq8-vPlH! zX7U=58z3;u_s}TgP+8+?E`LBTG@~=hJ9Zypb!IPU}z7^r~4}Ar_-i7@q9}lfF)pv?1*+b#yZF zB;T;j)^*K`Foy)|Ep2@Atrk@N`V-{sqs3Z z)0&QcSJA4G<80~q7i9OxP`!qOhe3e}U)HGq77UlXJ#Nz3ulgk^a_N_(OzmTc`*RcihRA=> zBjJ12%z`q_;+$PqIT|_2ilbcq5EIBZi2R%g%~u`7Jj~>ei1}k87oAXhM-O~$a|Uo> z!wtRROK#`N4t@S6k)IOLN+jo!KO?f7w^EFDDN;9WD5I15M$6L%_Mh1z`c-b@ zTl4$);_R!#4{|@R!9ndgg2N;Br*}?Vq?%$b7qi)?&WQ&@2Hcsst8C`YO@lp`e_)r^ zt6y60%KBw;&?lB}<8V+j9lUEQXCw~9NRN(v-4Ea9-A3W>Hz8A)YMpvy4@50Eq^ga? zP9CNRxGIpzV7Ki160^xnAVbI~9SBtq!%XB+o=XIo+61pd`o8NIFmbji0ZVdx*(IXa zp{*$+>3i99JBOtzr?`lgyO7GrX(_h5hL7lX&1jn|i1I$-`idSOH_6mj@k2{2@$+X4 zpk`E0d$wFf4@Q_910yvDh~6-hnNu8rL}{qEfF?wIa)MO`Y{9tn#R*J@9Dp9o^d8Jq z=795j@Vv{i;Q5p}PMjG9JgdOyIdjwsy}XZo%1t7-K}ffQrEcA5G7%}4x9If-5dvn{ z)p{-v{2p=df%Lt?(8<3A^(TK;59Wb;Ei*;HuVM9Tya`{Un(?|03#~0lKv?L)gA{_o@mxu`%z@7q z9TNKpq-&nxbn!J9I;Jd89kYZXgLd0R__$B6bfnkmvQ0zEnMJm=pNc16pa_jJ)TSf) zAJ{0{O>O;haJy{plgja9C>=%i>hSSs$qmm{Lo@C4*{9En29)yr?Gnsl}nWIstiF+~mYEiOcGs#YcKOgTLBBHE7~}*wo3oSR=M4dwiXshYzr#GI;&*-(KtW@MW#wz3Jtd`Mr zv9fcKo^|)+_kBK3h*yU2Ccv)o^`hn#T0U zjK8YjtY6sD{hco8b>XekwMLTOzFx0T{<7LC_PVAw^2_31;6qYjMn3?yPNNO0{YJah L?zFqU?;HODQm0=R diff --git a/models/__pycache__/dla_simple.cpython-36.pyc b/models/__pycache__/dla_simple.cpython-36.pyc deleted file mode 100644 index 3cf66f26834c8e7d027301dd1856c75e7d694adb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4085 zcmb_fOK%&=5$<`C96r{jm4)MMlF2(ESp_ zhpFJ6^^UL1X-3 z@Kg+i6nP@<2O*x%gcyfU;$v*|aBokFJ#)5xxL=lIb$>93<;(P_U&y^dysHN5_io?o z-@d!~$!0KzCD?XCbor2#=usbn2nOr09h+OQjsJ!XVKMH!ZtS?+K?%ysnL7&eMEPySr(udXDty6Q2m( zyVus9ng9mU#!T0vD9zI{inN{Q+J98!M;lyw3Ho*2+=|O&f4h)};5obbdi7$1#v2+S z`D#K)i!)tU`-Lo%NvRv+yEve$L^jloG>^tvoC~G>syD~!S-KHLNfs*=MSo`B zY!3E|LowKuNn8w`igBR^vi1xX9E1xWtVK`GBVkaElvg>J|vII+km# zSuHETsAUJ%`~){vLZ}n{V(2a^%m}HehHy)~*!UL?Zq$*(P$G$TcE#Y}n^p=(P1xJH@1w-=-Ia z(0z0b-uEJw{KuFmx+=qsu2nbcp1aXPtY;Z)U}M-dj}Hax8blEkH_2$+ilVP3aaO&N zYgo?cPktXG-Ch_Y5m^>RXD7j;H%XEXWI#iQR|KfK<=UO=hL>QvdX~PXehSvgZO^+b z{K~kjYh2bdF6%xmiqf3_YG?fvi(T!ka2#@1>n0-UEfc9PJFRgbZ`-UtC%Zp}>Wx>_ zFsWRlE*mr^WmkG>(x?TZUGfOop`pDCXjfKi0@=xaoWlu~Zoz)5nen=%=2rGJtA2z< z7Pcg``mpFpo9=!Ym!wE)2A8BvgE8c&nGL)lOTCRqV9%L4P^Mkov*&BiIFNRdxRl?) z0`dbQKO;ixEiYofRPwvT{2r0FL1@si3BIv8gLC1)Epy-tu=8a|pZ|c!4~Y7IHdY50((l?a{-zQ2UyPjH6H zXRRtI+S<#+k;tl9twJP0rcx#;3n2w)(Tz+D%c#U1b$|rFUyXZy1x;_4a-aM;ec2<@ zCqnmMD*tU%{t|Cg7ldNyHLH%+wSB7#e4eyPk(bd)iG>vT2u%^1fJR2q&_qh8!{Cvk zD3rHHGbpr*jE=kwdAUwx0Mhf!sSO6T3rAWvbmQOR@e6#UD4#I|cMA3=Epx~vB5kA8 zO0)kPEJ^40X_o~oTaq|e!}3GCS;jemKs2=9S)Ta1VM;xL(r!*Zl8_qdp`Kz8_39gt zi})P5s2V&|d040xOdLJ+!Sgv3jK0QwBqu^C)gzTORVn{Tnkts1bnTLK?ZVnY*_>Kb z?wT_+4AxtQSLXqW+$OJhZNm$A>m09RcrD(>w@bPAu=_RM24tw(xo*6pvn@&WbCWAc zAt-sy6*H@_{d^vh*aMKBeFpRS*I-EUuQ}>EmNEqWUbph6kLi^OPA0+=>`j=j0=)ZF zJozKar&yt?%BYfEU-TOW1B=O>MZdY|KVI~Awf_WFbSyD9{UXQ94Q!*E`Q)%FdW5># z6aN4)MSi}lW)Kq{s%p;nYnapTq`N}WWd|jtQG0}@B*+yGN+k>&bm6qlYUHj)rra?u zZ7N_Sp4zBR)h{KXsXc0-1%y8C8V(yZ7vEADIuZcl9UH*p1 z10oNJQ2iz8$R=UwAU@iQ#zm2#26c~8@d8yP>RtIcB$k?eXX;8JsNM&uLIs73>eoVa zLuGjVOoPklQM;JPs9^Z-vKc{Z8*LFby0;{85;XIWIVf%#libCiWHa`v`U_&=?+0?a zac;Qw%zcF#w2G%lBlgIhu^D8HbGTK8Hy$EC!8(Ybw2}74GR;d#5lMS8&Ur3>N3R|2 z_cmiEk1>-I8C@Iw&R&W0X2kucK>G#ES&=q1mBs%8P!U zOL8QAV>#^)CwW5uN%7wxI2%_M^?z^bQByaXTyMO}9N+9$DSuTgRA;49LDzp(m9n3b Z?<%TaT^uy9T2A+aZl}A}?bYjb>tDE2Q9S?v diff --git a/models/__pycache__/dpn.cpython-36.pyc b/models/__pycache__/dpn.cpython-36.pyc deleted file mode 100644 index c4017fc7bdad3787c6a9797e83b4fcd6e682b94a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3684 zcmbVPTW=&s8Lhte^o+fZ?d&C>jY%Ldz}l=GqD0~nWtRwjceU*qk9%gT zdv;@MMv62G`(%Cre}Gqh0pfulQBO!b4L^YwIH$Tj<6W&5i5~Uos;|Df^!J@QU9-2c z674yrTIOl@ovvd_Zr|y9UH?NN9qB$1(#@QH zFmt*gES~gX@oP&%1~Pmix{++i2+yW$$`+n2X+IF{m9v|7$H`#peo`K7b+YpDP#qUr zy?pEb>8C@L9&OLD4$(=87nf5Pim#%O39LR6`D15-KWA8T@SN(I{IK( zmV+$M(&IF!`}5hr&GCxA14>a^6EU;<_Don};!H&BOkAK_dVPOlO>Dlqbk1oDU<)Vq z3_aq8X}8htPCWQ{@bb?Cv_zM@nza|kyk_EBkJK_g#%e}eL49>5(EG~7SAS#8^t7-d z(-U*pGC21x_nX=DmjZ7acIFn|=3d*@&Erhv*&r@@XPI{Qdr2|p+s?NDZBH@V+Am7g zlbH@iiIlzkQ2U2!I35+sqmyHtRETnASkqoH9%V||>>S2%FYlFctfS$$j7Nhc&kEg? zSzctZ%tqyrw)0&3cZT_+U8%hk&)d5BK~kniouN8`&DqayR}H%??^fkKmiKh@AbU8@ z^0JoRhf>XRJAZ3$1J`K z5Z0>YTfV(%wXD~0U$@rm$a3+!YMpH?8Ml2Aq?9q^#lVw#cW@W~0F#y>_e_>*GeZJBrL}x3qOnyDA%u)duS76~e28O~N(8Rl*Gd z6RACu4(+5#*>=@yG+Cc(2Yl0(RIk&|z!qDAgQ0qyDETE+iZy^hGOk){R)kdj0eH54 zDKWKk+b5Cpsa^ku)Fi4779ierG`_AN|Qo4p`j8_J2;xpp8g7 z)a!<@#nhgHWYRGf&;b^A>XlC4Q}ucNj(1J&<>jf>r37W8$r6x?EzuXStPz7FZ~t+(pmSqr>cDi%Y* zi?7Z1yMwzp0!+L^km?X0C23tZ0iODYVj6sYw-gi5<*xYcyPw(9aOzEhzE#%BHnH(R z^(LXTkL?+}@R_wybC4*PNdj@4hj08^lr9u6_#;1OZlxS1-0C}+8N0QSA!V6L%1ryo zXq4sB>@CHRRsp|G$A*c|i$YU<7=r54SJVM6wAEYg@D^pIYYjfQ1wxGvABcNE+yOuxRLi-+AnNI;Ej;toKcDD#OBWHM-o2LrOaZK zW<3gJ*6Yj-Rmn|5-uZuUwOi^dG`vAr%I+SkZxi^2>K%ZNl1GQ}XgD0;%j`btWsmWx z#oE$3Ev`^-NJaA zaTFzSIl%>&w`k?&;`{~K31wXkLt{%(H5Xr>(rZ#cuFETOO}--6-?{TFVJgF1)byc2f&`5Gm&#Dt6|>UY-0{QC%@)j03Z$vCAqZqTy+PLio zgbypDl;e5Aof7hoct}K-jj$ zUudhJqxO>MFfto*Q?5M|omV@{msmze@fDtr4ilk%$OiKnf6>bScbx8XkY51s?j}y3 z(C~opX=PnZqkc&%tN(AD-rMEq&&TOkur1;QDhM8e@6eGvEyYZLJysvvk$(iFGjnN8y^-qWW!uqCo|R@>Y3ua)q`GBRl%Jtj z0UBag`8dpkmii6K?G+uwu^gs2gP3tcb5z>Sb0dO!X7sUZnc8@!o{j0tL3b}zW!#|2&4Z3Z-L`r diff --git a/models/__pycache__/efficientnet.cpython-36.pyc b/models/__pycache__/efficientnet.cpython-36.pyc deleted file mode 100644 index 9381d184abffec93f32521b74f53b81dd03fb351..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4969 zcmai2&6C_l6<6zfG#_5CH^fW$7|d5DiFaZf0vMNV>^Qk3TP|;kR85f*8g=ii=0h{? z)~vmQhReDzMU`E}krby~D6aek9O0HLx^kfQ#yJ=Gz1Ga^c$0us{nXuRb*ta|z2AG% zwR%0c_Vh2^55Hm<|1{PP8}-{L>UR*_=op-FbI3YOU(LSNv2@MuTYabFa*NwfjE=`0 z?&9jdYjBVIPYmu0t6w>`IstkrJU~yd=&7Qo%4_JUEqZFa&Kr2T&d>1;TpQvXKR+<# zZ$#tRU&=5K zo2cvUb}z{errn)*JZcYwjMU{!M9XS4nGBOSqWM(2I~;f0qex{!mLtO#Zj0qcQjzUU zj)D`Y+SrX5U2xFVM7fKiY7jZjzHw}98BheW z&T@9qn46i=XLvD_o7)C(fU{?#ZK2rQerCUFj^0V(h|wfF!aE<3&3z1(6^J6}3Ym_l!(kE@Hct*A zN7G^0vZaOFoq)^nB$Q;IPB0K+QOBT24Yc}7K^rY)oOP_H#y&iVTTlEBw89XPKB6> z%TdZN-+LSr@OwmCQt8cauo9Jg9mKP0LM;Df}=@(j*JR z!c5b`B`3Pd3rmSBB(Aos!d7CqUsRJcj1M9xaHxu^5~~IPTM5H>7%3Horz)4AA>(T( zDu6Ip1G=uE%&L3$c9v_d5J|fxRNh5VKY-9k#9oh0pwiSB1nQ32oa#2=uJ0TJv80nN zx)P&U<-S{{-o6&F2YaWu5b#Y%6SP_IF(!$qUI7zcbT`h1fRv6c6|Ce4G zA|`*PgHS+jO{!I&LcECYoDgm09;BT+glbJZebpaJBT!#v()4+p0l|3jOMMbJyFZ*nA6=dfT&8=z>}3@AS*n#n2G#seYFk! zEKr9r=X0C0fjRygVQ)ZBZECOcOY~{Zdj~9a0h=sbmC1yQ!ka{#BT~sN^q!CsPBh>g zJxr2~u@o*YjnN{?$@kT1~xCG>6P-vRjNb4j3o(}N|bl5(Vc6Be-Noq4U$PKC>??vxv0S*R&Kxc-fz-P zAC#cUShjU@$rbYJXfDx9m@FYoh~Gg`Hy{kZ!CX2mAJSZ$0jpWdRs((1uC4WzJ+p>J z>;IvYUtx^PP0~txKLv+pCKm=k5*lh(n<=tMzt#>en^7YPyUF+Pe}Zj1jU2G&twFu z28Kw(I$o{wmHZaxB(?^lw@2lmA9lVt0c}A#!3<`PZHC~ z?~wQ|iSLnE(Utr@HCJmc&8$g^Kud_$GzpUO5XD&IH4Wx3CMv?iil|DGq9|KIep*J^ zFCwejdWLvHEb~H0CHm2n^Gj6DP!2h>mDznqyE@Ye4;Iq&;-cR}_@OaA>{&tBb)Qiy zz?~|Bu7?!K-!^~-qJWyFfXc3X1M8eLA+#!&JaFpZrDHQm9wmQDga3mAfY6?Pi+V}D zg%$7jP93_$$5pyZa6udyMFSBIqaz_z;V!T5C8>xc+WNXg$6b1&nodV$loo1fr&FBJ zl0y_l6_`o8$&l^@lp2|&fJ}QbAMi$2YJwAywW4;uUtf?3{R>)zL{4s?yY&CX9%Ew zd&|h3SByFQt`BR1l)b!?2f53ud5zbec{(xm=ImkTL*wDq4~@*B#PvBPN{!PLo!FT4 zL9DH{&;wz7)R;3YWaMny_`Uhi-YdZl{gF@7HojFP8(MG{4BUJOjL%?ipkAi z(S$U+Ys@acsKPZH?%Y7kjfUzbD$6H}$|?eDq!aDP10_o+sk)(}PFVyWp!Z?CXs$)s@cog-3S#vlqXMX)Cj^wTVf zhJdZ!xCZk*nnYubbqja#nqRgMzISwBR3({_=9XI2?C~}_j!+a4j=^lu7B~bjyr?N` z6LotPDRd-Yv+Xa{?kdlub*Rv5stH6!VWA8Kt1y5bi%1@sC&+*BX~Msn;X(Q_}h_T1Y^&X~-SR(C~Ar(dJ8`)qDxf=O~N# zK+&PbHX$je4cKh!1(hzt1e3S%%;$pxk%uJ9>0I`I1qb4}0m>qSZPfjjB=yB8`XEqO zQM94DsA!HxIKYYYtLLHZoStYSkL6$r9?-7{v}t5r7f3xGpS3tCt)u4~IM4mjn`;Oh zGqAzN!Zeq$T*>34trJSJPc}$UYIQ2c0H@%5g+#9H5 z_|1S#GJMsS8Lf3}&w=<{Covh2-nWS&mtT&n-OHchX>bKiFDbAVj>P9|S~wF)nPZJ^ zTZM&R9_0rUJ36}sI}utmd?pNA8)a6Zm+ZtKK)oHcHH!_eF0q*vFoheXPP=Skf4BL1W9_?iz)l&2M@B2Sj z82gLOJQnzEwB$aBU@ayL!8=CF&|ls*Tc+l$w%N8@&K)L9VLfBQ3eC2gnXLjOws0VE zMv|g%Md2B1l>~pl8pZRKJKgT#Znzhw^~X_qRKM4M*p63yPB%WiE>!tjKMjudy7Cwu>wdWVz2f^DXxGu2s(9Bw zyVvb@pyd0Tje@e0uyde_QS9|PejFw(E8aNUXt{CotSLi*E*NyT&bIO{m{z$57o!3A ztD@%x9Y0At?>F{$bN#4$9Io%n!0)c#4}0BYz1L4EC$9z3fiKsOyCUo)>xanOP8f&j zTCXpQ_&Lg}m46Gy!YjR58QGrKCaJnMj#5BDqlZFiqwSz2H$Vog z%?3thJmKIB%^@q>TyxmDwy8NgwUC3HvT$G@A`dgFI=(0M);V$>uH*udMIuWe%GN)t z%)n0@j=V%7<4o~BHldGA=r_?eHw?K9y`(kh40FLGl#m6Z z@{yJV=u95^ZF>F}giH`Bpemq;zO>!sF1h|%2%*A411jakFUBB(;vRuW5f%+lAba!VlWu~Lfsyb7epvWhChKsbr?qrFp0K_+r~CVPssu8DN>5uMmR&* zkTSWkDoHLsBtnHQNh-@ka9%P=hl!j6QKg;?y^h}xWs>6^_nUcT)ky6g`)7naiu;>+ zt?^(}ae)@1`CGuWEmgdAdN_hO*FQbf1DxF`4t=Sdj{aheRF~uIpOm3q-WSj$?|}e^ z3@b+-bJA#T-g*YCCAjuOKgB4w6};52v$HbLNDzCLGXbq}6Ar zwsR-4!i1ZSEly;m30EF*yj>aab`>~=vdXyjHaFG**{nT@ea!P|_z#}Oy!?y=pA*p- zN8yoQ5+SscUlAchm0uINN#q+Mw?LG28ikK>#^AV<6o-bX1239+=SJYSrKe}atiT?~}8fAW1II^*iZ(Y0qf-+1#i^igioFOH#aRcC7e ziGEDb$?>s=G(0W;i-YNw Pwr~`ejEY&?s9pOPZnIn9Q1kU2ou^h{KKP~o8u1`yqoDY`}h>R1& z&9c@)^$G+@A_|Gv9hp!}aqC6aDtKhy5t~E~2G))VoY3`Cypb!0s=fl$q^1pNS;ksI zNo{2|1`}3mHH14xXTo0VF6bT66;8{*>KV-uuF<+ib4AB!z95j^A1^V{(nW9(b`Y)* zP7!>B6M*zPFbf`Nr<%{CQczx^J{|7J2a>N)Aap=?U=Ls~2BxmV4aEc*>@DxkmN&lkHaf^%ly6vLhHAV-QVLtXH1HQz;A{Fp+e+qh z^$;xDpT<{wR+jllDC~~5FH(64^-C90?P7pEgvS7ZrJkS%z0y`Wuhla&7EUjk2eI2UCC;XTvyVNs|)xahr&I-Jalq)y8s k&b5Cps~70yelqM#u=~tg|L?tB@qdTKu5fH@>eC+k2aLb|0ssI2 diff --git a/models/__pycache__/mobilenet.cpython-36.pyc b/models/__pycache__/mobilenet.cpython-36.pyc deleted file mode 100644 index 2f69978e4266174b561556d05c7bd46ceb774c44..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2594 zcmZuzU5^w+6s_v->HPo}ML>ZT<423^3YZv)Q3S*nm=G|UOf+q8SIuq@z1>4~56ezw z6O$p~Mf@MW`g7{lr}+!M=(*K1vm3EHb*tu9b@#1v&$%@>*4DxsFaO-0`HcO=j$IDM zx6tZGFoN}%;KC|+kDJ~a*ge~foPj-Xd){|U*ur_igd^>NKec-SJg)HI@#db6@I~-~ z^+I7iW^rfy+kz555BKdw!0aIkEM*7T^SA2p;Xc3<*NE-^liVNXSpn! z=uTBWtBO&RS7lm6T{%*wYxXPkq>lQPirVL*AMzS&qHl)7BF|Dt860%A2 zqTUQ=*d^;`WEU49Kv(@SjAYn3>sf+h-~LP1=Vrg1p40BK=e}eP>v`B&pxxU=l|30> zx+{mx?tWg&C_}i>mFQuWm(9{K<9Knl_zd+OG#jn{38rBUpRlPlu%?W&#vV8mJ^}m_ zTUh7;_bY$G5*IVxgior1poY5P7+yy&YAJhP8IK^R!kJcI+sZp7+RrJ4{jRaj4yTgtRFOzjTl! zd6_p!qOG#j9_8%1&@O>-O|RTen{2mRsXh4Y?ebbXah=B3V@Er+Ec&{Wm&ve5OIho% z8a4B=`E1N}CrPp*t?MNDgZ+JDYq#2yTiYs2tF1?JSk+s@gN8_THOu>{+S;pxENXN_ zyHYlr!vnQ~IM>kX5Qgz}ex9%LvwXa6QnUlVw8C#1U|ZulvTA!MD5!Ub^c0Fh@EP>!9cf;fRwU z>f$~nyqz^DY{Mul5S0_&9(n>knYlR}~MgEqAG9S8#cP=Gx77|$J8S+Ls9`a3#AZ7=I4EiK^0T1kY;1kz`V?1D6yUvslx2rzDJC6XF{evs+sB`2!Pv)qu&`x!j zKHEu#RaKzYI?r;skMfF2ueLBbuQx3k@6f~l0fOqmCR$Bwq-qlaiVfKDsUsK`b;nYK znTZed`|#NKqhsTEIp&Iv*Emo_L3N2tMCKY9qXEi;sY&WGjh4YJm~1K$NrpCFldc2n zqP6Y^9V4U+4s%gX=*+rd*U~tmr(t__kE($!-)qk+M^r$j&es zO4Ygjr{Y|yN{Y^7sWUN3a@|P^(fK+nWTHCv%c{y{A=6X+c~lSC!I;lVb*ekZrRqG$ z6Ror3EKRDcEI#UdDJz+hYDak@`)9JKtnapsKP#2&2w5dru6Lsumd_4Tl8eioEBX!; z$&eD(a|G|XFWE7YB-|Ha&l8^Tu~`2l^H?vyf&=5<&&%}m@*VknNb~;?9~o9B&oV8c z56i5mNKgIw@ByApH2R$WCq%_6K4w#A;7l23V{go2ckGV&7Mu8$H}J=N>|;;t6omIG zfIon}G4`kE7l$^+M~pxA1jV`{c-4TXxe^u4qp+aYMrzp@$Bxv7zBXm(xlSXUI_f8U z+c|aBPvo&9;C*$3MenIYy#zx-8izBERVfk=yPj#CN>#`_*4d>rfv!|0q-hKjA+n-x zyrV49Gs;ERR|HN)c~X=l6ZBO%8fw)b?=|D=5pbg%a)oi66HrzNxVS)!`alS8% zpW=DfwC*QWdU9B*GuYgt;%@EOCw;%x4@f^Sz~$2su#zRY3H3>-s&rIgUp+1JW7Euv zc$g=J)FvuN)k1ID-PV9xJ%gZ*>2lMIt0vO3@29bTvw*0_eIZ$U7=&I2dpJG{+rqT%U8{5tpe7QbA# z;M-lKBc<%nMH!`ZK0wpoLsV=4v<$FM17K@v?};(+j0C9 z6p??ITg`G%!3&*50kpvx7dU(rM?>Zpq*WQNFnd(+xdye=<0LA>dE z5K9Kx5^suaaYO8gx5UkU0|fgE%P8yO+KVO_<+^x-7)5M|O*{k3EDa>}mT9d}sZH>V zvO1$F%yvl7ZqGUpDkSY53EK7948>kUmFkkPn~9^2DXN+pcGpOwd;tMx$*GL<0Q_P?b` zxu^C3P5%JVXA|f7Bjl8Q$^P>2dvD@Syoo;v#z7xs-M93=!?xHBHfcP6f80RSEqI*) zuN)Axhr=S6dQ(n^$AmxK|BgLvf5&K7#?FsK$&2#zjhi0#O^^(SvJe)(%4Y3N%%U6G zW!qe<-@%}6l6VWkM3jmnI_7juca56I^;NahW%4Pazd)O3g(zx^M`&$+x$|09R<>k` zOhV+6w2tud|49p(!pexK=6)X*Oxd(7`OHMgc|RVOWsW1pJI~}ZoEbRf)Mw~0CpPsib-qnv zNi21$-h=IrX!BHUBTd^*n{VKGxw)FE1;KfCdg=oji!x;eO6Eh-KO*t*&#kt!2y520 zh}|QDT{LxASnrb6#?s&mD?z-ruErja^=b}W{g%3f>V2~Rio~x;d``mFCgH-et-3?n zG8K!uvK&jLOm0o*MCdsD>JFUsC~VqdPvNu|eIIO4*AcC>VpBH3&(wgp3;%1vouVGb zjE*Z{#RU$$DWf{E{No+g3A1aa{`do-DHz`#(8gEzohyt#RQQEcf1m)@xHzNjo*I5} zwG6wX#;c^R9wAQCkc`tbWbWk*widzeSW+wR8O8+ZN-hRU4s diff --git a/models/__pycache__/pnasnet.cpython-36.pyc b/models/__pycache__/pnasnet.cpython-36.pyc deleted file mode 100644 index 0148434237b551541da9e5ac6bc7e03aefee869f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4623 zcmb_gTXP&o74DwZI9($uV&Xh zv$mz$eOecOa(+bKsN#vAGp{`9FYrRXujeB9N}w23pYA!++m~~`@APTybvohR(|;U% zxgo?q#kt2p`!kg6uMkQMgp$gd$bmGqHMR$~X*pwi><+xog|e0NL?|b*$NtJ51kkw3 zgT|{gEogidKoeA&5So?>p$RL^np#usCt}c69o0qMQR`|0byrz;MDN1Mja&Weclt@b zJxaH49sgmfhxc~E@K!ua^vBz`^z=|CSvGo@Z113Xdrs>Ymtp;Y@Lg7T&XArr_v__J84QF>T72ctMEyhA;m&$3=n zxY-=m(d<_QQ8Y?Nc@z~^nid|Hx~mFzi27==wjbxid;O_Cg3dljulC%+$&$(4qBTmR z*(6SrtO%#`yl$`EPjs40qHJ`M6ke9=kxF{9XhqR*5@%Ty{Z0Jq>Xm!bqvXng9>&uv zx0Bg4yD~e@>9-Guqr0)bax_)R1j;nd(j?!R9c$XDjgnCa*_DBGWGLJ6WTP_oPQ8sG zcg2S_ZF>VH1C+%=j9KJDuJ8wY$$rn$U3}3T7VULZg?y~PL7fqosPXSk^`ls;ExcrA zczLp3&s%-#`LtvjA0@D47)98_e8P4+iXP14N%h5#A~hXGk=c>H%+WiQZHCEYf)CBy zJ*hdN<`RsnLQd~NaP#fJ=!ST09`ch<_C7=dS+4lnbguz_E=H!$&aO;Zi) z5m*N(RAzsKcpjU;q=@X?;8RJU(|YC*k!acXEqy!p#y+u&*)X5-l>f}b_W=4%w z%H`Msd%@Q+ET-&d&JuP++kwqKLJKw=!-hoX(nfEvdttvN3|7HyI>Q*IXTN9?&f4?! zHmXA2EW)GsaWtDwC%dY&O9S%4(aB_9Z~ra!w~nu(?4ALyL>#gCU6hPA6d>Cc%7#4Y zlpC*MFUu8qnr6~(Q+$iU_=IEVZ&SP))$|_wWCm}t8yJ5Mp=lm|;qyOWdjAcW{t`oe zJxo_9w;Pmum4;Ed#J!J}3m-nr1K^vu_lS8FsFuNf2>n`x`!@S1cVTUbm-=U($MA|@5SuW9i{Aw`!pgHA4Bw7Ww;_=^$#fChZtDL zdnnfm>i`Ts*e&Go8PCp0{1LjAmbXF~T3d$3q1A;SYC7-l{9ouyh%sBL>AbF~d5+GE zVSKV{5cLsd4k+YQ$f$6DFl9t$WfH}KTc(Et9Ae0@d>mM*Hh3u_ zCwIr5IkA|+AqFFt;>qMgA29Nw-GYxpGFo}hfU>Ccxvl&yAOr86>O0<>K3#(gTLo30 z^{Nlvn?4(u;{wK|uC?OGn>EK9487*GJ0%C0My2zkh!ZQ`}K4dA6(kwojO_0VJEiFF1t(?Uwy>>2E8)oIVLzDJTNS50G?Yc*2zZY@v{I?~g>{EzCv9urWY5K(V#_mCwVYW(tH$8dTyCMdgVsTotLJ|3VANC zE15}F{+WyBI06Y>7qKvuCl{Nop1Gw-X}N3e*zEQlG_a>Cr7ib`u*hBE2rBnNfWYL8 zHaJ6?#Up_*>CdmBFI?l^7bC~V_!Oa-gi|2v@y@U93?0v{jnkR6&Ctjhpc}iFA=89y zhrTf_9D`NMTx5*SZ*&OG{EBgqG%ATOOdo~|i-+mBjlhhn(5f7P`$dXmazcs8jTcY< zji=Ws!!D!ixrqC7zW##Z6N+C_5I;p2KRm2+r_ZQx9*&Ynxb@&hQ`?PkxUg~UdC_W2 zW)w=sbXLeq@M!(lX4be~d$*3a!7KA7fg{s6m!{_Nan8g|US8Bw?`P;yPkkHhW@;NR&8|PdODoG&{rwWEM<)3$S5EAv9nD1J3Ntjzffyv#|^zcxBl^M1w-BcdB-`duO|6 zt9#bo)vSaxmi&VKz;8fEydlJ|saKxng&)8Re5bl+cDyk-3G}F|s!vt*sXFI;U!9)K zxjBFH*Z1DDdV=V zlG@1Xc#t^Ug~Y8Sbx3O5gT$*OKCg5CDeE+NgEvt(`5d1|J;xXLBIP2ooVy!Eue|jLryHU4@kE0^a(%_-Uw?z@eY4G6W(@b{v+rIx$>>Z^^M@sev}Vr((v`PS z2nK7ij>!$!((Ks0_KfWs+K%;(Q(CX%K4UiP)Ua<)HL&jwyTw*7>mHo8E*Rlh$efIk z?FPlZ2)3d;?&^{4u>ob>iE64n+!pTq4=4&&7(+HP6LZ82R#=HWG)9nw4%+U}7-E4# zERYuV+=aZR$C!GIr^lEy1|!bL)M>_nmG>~8CH~M{W(SshfzgfXt(BCU7-@0mxuf6L z0Po$5HEaJ|oV8*=B zG*;*~uNt;-x;zsd?a6Mtw0XM7?R0;HJO30#!4h(fNzNV_XJ%myf$@RcIPf+@FJaT%mdkiIV%4~(Y$t0-o5{zK!0a|LV@>^H*dgz_53U6Pb zk+e7+U$my2!p?&$Y;(S;rCkW7*93sC#=fVqZ*%7uRyuV^$PBkqQvN zk0F_5!3zYC$#^eL!BAV%DM7jW_L!EU>wj_#3*v3@Z@cpWFYD5o{ovSi#Y(QJ-NA zzhVuRG>94WsPausngiZ=p?O31f;awx@pDt&sOAvEaJS-(dCeQO?TRwYk_^Zpk~WJN zJ7F}k<)iF#|v8m&xi zN$>_0#7>jgnMSwP)Iv{AlS*7KSJ-~D#P+p|-hU0i6TIjD25^>m)w}AifxR!k0mYI~ zTH5QI)S_Koz`UgG${(Pp&@bO6!MCW;!>33qAr_$HHtw7paE^VA{*^D-<{|~iZ)1}D z4i(>};yM*CBVB%vWK*pZA8CA3G*I^}zUOs}dp?&d3|k+vJU*k<2Z#p*6xG5tdkNZjbL^tr~ulgZxz> zl$4#o-UN;D@2H?7Ym8Kc#SG0>g~=a6T4GYeNdt2VN7V=qqXQB4q7xyNt1F!8y56Yj z&8ohs++W72h-3*`x2GQ_r;xKl*_HCs!C|PIxoS{bAKxOM1jftY-NM?Yhq7y~;Akp; zr&ng(dJbA5Bf5x+Xyf`+et^c%-h;FDDB&_z*fM*AojH5#%zbiW=-g$WUq7>lZo)J< z!lgrdWQ`1ip=Bf>QW)Pjdi&R966e04R0=1uh4;1`uS^={R>Gq6+rb!8{jBdUC z-=cvi!{(}F0~@SbNG__DHErn>1!;?e92IdtGARW?tec6)F`C<2!V=9{ReIuV%kM*r z4moX>@xO4j{F0s4scVCZO)73uL42nC=y)&eXIT&Vp?w^S$H@1PiOToTiM*4tml^(= zlE^uA{R$&;GC6~7Ccmb!g!UwDtE_ZRT|dHT_~in1=|hQ3uJe=s##)4z4{_(@MbDUT zc%CF#7x(m<#vUz>XaxkKe|s=r89nr+(lYf_ozu+te+mLMKO5%j??;%E6O5T5X$`N{ zNu*^|I_>R}Gssh8THsgHzIl^l&O|#!*4Nf93D*_X(l5r$}7FvA5`Nn+mCr7|!01a0N%7q`&)`b(d7N4Ow@l$2FvL~!^UThZ#1o?YfEn}1)f#? G?0*9tubE^3 diff --git a/models/__pycache__/regnet.cpython-36.pyc b/models/__pycache__/regnet.cpython-36.pyc deleted file mode 100644 index b5d985613079e5e1ad90b3f63acf8e342d941c2d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4572 zcmcIoNpBp-74Cg{dKOZY;zW*}5Jnb{6LBO;i4#S!6>G^QCIr#(GJ(-(b`^&`9QKT= znxaHBfdCXNz<_ed$%h z;YoMYICcjiB)$kB32I3bl7m`6zHwXXY?x>Ha1~t+CTe%J zlB{k{lT_+A(DlCDmP+QSyuP|q6jOb@-ydeh&i>X~I@#;*N|oqKg-qsbGM$dIG%2!4 zuKQc#$yR?a(S=l1&-hfc+}_U8Oy)%{i?!+FXa?O`8!6HG3`%f*;8U)yNV4Tf6hp%rw*q8E9UP^Lt>CU4xD|lI}SXH{amd=N! zGQdlvz7A4YgfNia0h-o}1v-B!9RrjH~4pBI`ZZcv_CPO*6~m z={U)y9yGPA*`95hah#45t>gGF)|Z$2JCi-x-%@EZ>ED;riSADy7i8^AX||oH{@z5$ zvFa+xuUzFXsDjUMa2ikzH81WBAFK1Lm4Lzhw*eW8AA`A zlH+|9;RDqIF%3BC-Xs$}SGB2bKQiuQzfiP=iY#-t?TI=_l(>M3rmro#Z7*YYp?w%( zcXcyN0rls}o`m|NIEHri$Hcee_`!ZMu3H*$EGB6jv$E7TX@X9z^;C|>Xjn|&vsD-E zidMn)rB$+SL)&xOKFdjMMF-E+pMT4vuW;yvm3!TVf04H;A7sW3=d-{Bx=@*r zrZG)~0OXlqs3!YU%{>)`EqseV{qFqx-g!mQp%F8%cQz(!j{pgCy!QAY6UB~M-kKD}SmrX_jTK=WMvyh<9>iNF%J=u;YPU3& zmUC}q_hpx*-O)A~XaNV#G613Ngm#4RX*-b}a7i+fcnGM-KJ2nTeg$UiU|7bCIaUBE zDzvDjaH5f4AxMoOfw}=%gJg^;A!^M9#t_OxCB}Fg*urB}{Rn03jb2OPbhgxaB0nHP z0jB5xQ9mToBk~rJ9~1czku{JSTGTg4@CuPdbXQ?b0Pu6XnlxtFuizK3e!8TKPNj=E z!UXDD=%g+Zd6fuhm@!1XM%-fTxh>cXWCCi|+beuW@$(tERJ_mCeM-YovOh{nG9>pSJkPkxwC?G=sIpc_c zNx(PBJR)Ge#k|Wr;Y1`Vtm!acML(g#66&r9FIu8`(So4~HI_N9qm9{3tVn%_2rck# z&pQ#Ot0bma=Lnwh$r^B2=-Gep^v+cwe2uzPA)IaxRfnpJ0Iodb`y?ZWF|Bwn*_H7) zc`TI)xVTc6>vegxF0Yy3H(4$d>c{2Wizt&YEebq7rKO(R`E<yYWQR$ZfP7v7Jwor$UJoxY< zi!nR=10_Yx4HF1CEq1i}8P5Sgb-nO5l#0wRF$o$y#X%~U7CLJB zoUoBK2imq*Ko7gM*5>eNtDn%YjWbb_O!1XaNfCV;Z=GayJK?e-*#a!dpD5xvLe{|m zwHUXCd}APR1V^$^gy5RsBGLL&a0IjA0$@X_?MkUL0^J!6HX~G@ksr?}=&~thQEyX~ z(S+vg1&XF*XIHes7u*m)K}MG9tj+!o^2KIdROn#4 zMV-lE3JZ6t>TU|CE%2v8<<7td?+6DkWkkq_0Nf*wz_`AVRwm`+lQ!A&*?EjVRh-N6 zB@rntF3CCWr#tv(09Q{Jw?s~ajQ#jU5A_PEi$Bt;D2cFeW`#h<&{h=xbg$Y=aAl2q z%j(+@s83nS)2!t=AG+rJyt=l%pQrR^0n*;+InQ8ag7^7a#tCzU{-g$8z9PR+q%WSe pLXkyERjru()Py&xyWKmK$ZHDTDs^qUE6XcyuDlht`4@)ve*qPbuCV|B diff --git a/models/__pycache__/resnet.cpython-36.pyc b/models/__pycache__/resnet.cpython-36.pyc deleted file mode 100644 index ad0b816d914e8b062f5e2da9c68e651a16134e28..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4423 zcmbtXOOG4J5$<`C96t7GTTUFCcAO{_R{PW@h7mZ5EXa>Igdhd5z`z*HvU_)j%blTm zhAZuciv*wmU84Mi90Me$Aiu-NA;-Sv#J`YB@>S1}oRxQNBc8!j_f*$(byw9_U9(fK zdppnnv3It}*uPofveCbdC;1}?XC1~3ZuX6ip`^oFYraQ4Zg&e(Jt^6zKV8{ zn-5v*m9sA&h@>sjW*9Z^pZ+G6-NSXyyBEvmeJO4R-84K2(lCyinb=x05u$m0AO(he zDUwK}>x0wl_~wDw7g9uB@xJ%ltuLCt3c}+sI%s|@)|!vPAUZf5HamxC*P0IxgZMFx zJrL1a^HU5q9}Xk0iR-Qq15y?8VbE`WCIT6e;6B##$#HNXFxHI^qL7xf-VEfU@Z|li z-L38Q&7Gb1cD)Jg%i3L{(~E3RS8@YHFxZ@ROm4v9?sK*eKXB_=rDJoOJFtrLoY}17 z!ZIc0-3gMgd#4|Fk0u5_?oOwowv5NZll%=NWvMY@W3y+DnZZ)4XOD~#l#DEzp4%_n z5%V4J-H|zlq+il9CdoizE>kYZRM7Xv4ANDa=g3MFzvgBWAT+cJ(aubpk zcV1+=a@nylHfa{lur0Sep=C=V_gc299tjzVzMq6=Lb*vQLoQTl5O5B=D|;^tl9nYM z`otwd`)<{hlMDw!N)vCY?E4`+;`_>sBISM&K@cTyF&1biqQ9>yVdM|`K_n98X02XM%TN{H z@AiWv@%_KD|7>m?#>Zk~Pj-WNW-oV@0$~+j>r~B>F_UR&HQc!$|C%FTXvK~1FX75=e0}jA_ zPH#3Ihk`)A6UU|TpRsdOe}fUo5i*T7LeRLUY$^IfxqwM!od_*fBEc}Z0s`N>f<_rn zl@$+D`8o|~Li=1=|aXb-IO-tO;Yg8?;d+N~ zy(5=NNXMtG<5Rv$jMs>e%6H*IU zop1SNtjZ>Y<=aFE<}blg{*ZXHv@o^21|#|S2A(+>SG3{d3~fIFH$$5S%2kk4RsT3R5`I586;ipn z*~;7Pyxqy$Tgv?`j8H)7fB;j!APea8!r7y;MtRZj*w@`eRjIF!Qjtu<*Yf*pA#?Q{l2kO%MahN?pijs9=#1aJ=#O&Cjzk%l#7-HD;FdT8?sq!9YYj%QyOf z(zc|0)GB4EXczq~75$WYv@%u73Lg{~+NQ}IFc~r__CLpyBXbp1frk{eXq+uicRhE^ zHmu9RDlH4S+MPwU9dBMqnKZxT#>Nfi%6i-lzp+=OqFql^zU!Tc* z2qt8B%4A-C*NkQK&KkMZ=DDmz)0SS&Wt27~8DEzeUzf`WL7(8sb6KNaE=%HF$YrxA zq-9akL3+{m0Zf?1(Hcftrv6kHCWT0tfkBnCdXw%#gnT{YGt6>KnN>yPf3^ z^3#Nt_HFg<+=?MR)%(luqF3H7=+$Tb?5N0guFJ@Lzq^}BLGPXKqIW{Iz;wS?>aFB< z-rRapP<;3QR6L=AXF|%pwc_3F-xU;pR8&k}pT*M+yrgW*&XJu`WU1+~HMRjrBgH>J z&mJ?%V*2=i=Pi7p-9Krf*K&0DC`bRRC`SenkCZczVU)@bNWfGUezlcxS_nw(8#@(A z`g@4z$Y+L&yd>N7$&xDhK99R7#t9j!tS4WOB3;s{mFesH{xHISejMQ^%$xUhrcmy$ zb@88JK|6|2rB^E{@+5_UB$r4skJemRVgWNh&we95ATK2JM_?OeHR;+*rlx?eSRo|+vx^?flr^*MdR&?;{ zA4h*}G4^kE>7(crTgoq&uxG-C+og zF9K+SN>dj#5x!)@NYq7yx*-~(iMlCTqK&#Gw!}8-ws4-Z?ybw>3#kY4tD-Z>I-g&B znXA!BFN&T@*(pwBXO_&Q>OA;(JXUg?6jF3Bd?Mya+WAr?nLf_dSyD{$On1=iJdtwN z8OXUp|3DTma&@X7*q*ao$&QdkGD&qWT4A?rFe15r$P@JF9)x7@02?}jWB<<36VWSn z%tat-FY7~Jgrbfe1h1IKhBfS>Vf;sFK03X;_o>R~Ggvvwv*&p_C(AlV)YV%#n0M77 zK})ST#K?!BCqSvAxG4Hx5Ctpvf-Rk?vt*ntoQ1o~uKdECdJDerDI~&u?JZbLx?tfi zF*>f4<8Lz`wib@kt{ zel_CMRsW=ZJKsi&bv>*bye=)<%(irB7Qx`I+Cb5UKj}KAaVk|N(^yX~rSXm?iC&Qp zy1uHRGJ!7CL`YMcB|=QHu?fcHV68$L@(=QE%S(JR#lq&_f!<+HiWib;~1P@m+g z7|n}rU_33;V^g1G@hnX;sZC>)DA=2%lj6c`ALT`n%1n+<;};WAoLGxZ!)~S2bh)X= z@hDBSj^n?x|LpIb(l!A(|P2B}w z+TWE^X#u^dzi`%Cyagz=Qgh`1uxuLAS0A$K8;m7A!p{fYy5XM~PswzyZeuBRi^Mlb z?2@=k;vEvS8N~UUs0=?hZay#6cd5gQ2c)hc@n68*-@%J+LNILg4&R0gB7WH_w_A$onMJsK1h4x9QM#0s8f%rdOVX?b#k(tC7#3sSChV11;^WY9*m=L@Wt3%kOi2c^AP^L9`OA$*d zvVLVV{+7Z4#?<>HY!tptt@|XtMPiG@HVKQU9cmH4Od~!^PGy`X7gCwPR{K@muj&I6 z{C1Mbg!*j_60*39viVtTNnESVdZs~Io3T=_!@U8eHLi<=HQ|}EF`%DA8M#A0=W@FW z*mWk-HtC_D5&Jzx(J92j8RI-2b2y&gWxMPhcIA%Qm3RJ&h5Lm4v5&m+ruM+7vX$YO zaF*VZb7+yL$Y8h~T9UEz?Eddjdw3VlN6bO+DeMdH&qU5{NzT^k$c`YH&15DlTuUEs zTu>&1b@{unvT~RD5bcuTY9)K?%E9_NKgz_|$4DBjDU=L!H!=3^91s3vBM=;siOz;0V>FRl5Fs~(1RYwAa+6d_Rkn1oFh^0(zJ^;2qXR$9Z? zvK8ePeXMDhA#bL@TN*=Ve=TDkk};oSmbEbynzuB@;_2&n z=I3P0(;LP-dV6Cwr{dd&>7al9Q2g$OF$9l|XI#8&Fhd9}*pgilInh}|sBxFx6;2qs zGT_4CA{o6UBLcIhgjbQ0g%sHevN=R$e0wP}KK=^`+|_1kk`?OLG{7}(CJUQbhF@HR z-KLdk7E%|nDz8s}z<7H&Gm!K)E>7wF(U$ Kc`tO!-~Rw2FWl+? diff --git a/models/__pycache__/senet.cpython-36.pyc b/models/__pycache__/senet.cpython-36.pyc deleted file mode 100644 index 5a771c603f7fa7879c1f4bcee787e783ee24ab60..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3906 zcmb7HTW=&s6|U;r^z>Zfc+JKMuuLulCTMN!4J05Gh20Po1jCAr1hPP_c30c(@wjKU zy1g4)GZJD}+NaGEe*zx!$}7L3UU{0ozzdx3bWhKCZ9w#>PFJ70pE}=n&fM#Cf_u-u zI{e!!Li|IlJr31Brjn0HC~+W^RMtoyNPb&G`@rU!Gqi{9f%kKvY~?%?%1P{DV{RY# zq;Zu;8n4ncNz+h1Y5YnPsHO^@iG!AEsW!da%6cUFoy*@n`q>~UdTG{sc=7pI$0s{M zP^t1>agy|&rdgKg-uS5Z*;#a)Q0Ke*-{1RSr}sHEPNE65rFkzK7d@Sfk|6P=sn~KmSy*IVy)H7^w z9}9h`$EuIN)~8AJU@oY43-g@X`2@3`+WOCE;Tf=sMXk1SU)1x@EsWuItl+&NXobcL zo3!mx3mryQ+Kv)*V1KZ{fi%RYeaEy;6P+caFi$TNGNU1bCHqK!h<$YVb z7~(;|B8v?YeaYH>*SPsPU6Zy*Y5Xuuv$P09V`Z7~9*nam`^va6z4uJ(epJLKgRwp% zoqd??l@0r-@0;LJ^7uT-iZmJ-KR+4kB0evSeH8CO?9*PIJQ*FCW}1bQQIsXQ@ya*v ztS?P74C7Ih=VAC4@z34elkr)yd#K}Ry!)GEGR}7=7X@7KZk!%PdiQLsl2N{!CmH$E zg3r0e#py1@Cd8{GOb|Ue4kzRBh%Uu}8|be=-N@79vvI2Q zZS>qC(RcJ~XpvtSdrVg&pX#rpjm0<6(l{FHr;%3Qq>mgc2zgs>*qda)4a>K>GLV;> zYX+>klPkd;`ySjUi_Q{qpCAnBg3d>%x5M!9c{D0p_?|Q%ZaUR{$H{0EhO5ocd~Z;@ z#-7+M1FGbVXW89kO-!2lK?|P$k2qQnb@J1=VCLZc-^?6e(Zn|~2Qx&8=Ibm{F$edl z*y0+CG%LP1s`%nh6<>JFN;O~b9N-Cdv1>d*+W){48fIDY#2N!MpaucF7AG zR@dKy0IYNyf?|^>y|9DYRaexvpo6Qi3$o``PNxA%{W=6_XnDi6>+uph)RJR3(977B z+ZM394diaE-Pd*Kg5hfDen}q{1g#Rb1TJD$f~Te5VfYfvGHRQVwgfW6+N(x2N@!Q$ z_8Htw1-M%?r|J_-9R@ePTQp01`Axl@3fwke&0w|%rC|WAen;P-m;M$6AZ;2aQ63eA zF8i=mf$b9IJ=ETW0I>C25CC=GWjyP*p?C*^P5*7wt^%9k_7UkT>uwM)2J&_dY@c{> z-MTO1T7L&~eixz-!TtaU>-Qk)#g^B~z=h}BrLqL>2K&;@z{Sni5#PTl;ww)zDDt}; z`8{ICrg1qDS-?$ihVEaf47KY|=L?Jsbf21JXCw^zBODBE7Yy@6cLZMJWTP0k3dqZx0q9(~kcq zZZ*;zGBtp-_#;NZsTP#eh*VI-=x>zJh4IU)V(>A3SE#L#Y+B)2bee>t=pxa^IX5&H3a8B^RtBw!}U&d)-w=B9-@$MYm#I>uGJKco#-VWulRtBlv% z@3^xH2{ zR~Z8Cq0{>iKY;ilM301Vo}|gslDs~oZwfg`LDnd%=8n=#AQeTW&OQP%5q@n-;K*2= z(pW2k0&~#4+(b|?IWymXV<~{-*H{p@=*w=~_ce4^c&c_djS+?h#=XL9Jw1-zN* z!zsO&>0{BOu@oOLxp6sbHm;5`l^J)U)2!et>08F85;4I z&oF1lG{R7gW6IY)!Q?(qzLRB~ADCOq&z+<5EH2VdKXDFJ diff --git a/models/__pycache__/shufflenet.cpython-36.pyc b/models/__pycache__/shufflenet.cpython-36.pyc deleted file mode 100644 index 703c5582453f2889b82dcc08f9d4ed97a783d79b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4019 zcmbtX&6C_l6<6zPG@9A9`*l+~W3A zgWJOD*QQp-gT>)4EN*3~!&2iOEM8^td7b-DjZT9%coTJ#ukcmWE8Kiwv@V^VJvba4 z93-MGiXcvdPfk9|WcP5>_a6um6o(=hMnfTk8;fpl2Y1ro{U?PK1Cg8r?;jk*-B_eW zu$QIBSu!f(ERB+&Ek;sT#p6sK1qYc7KF;>zLJ@o*Q2Bl*^Gi;;dU{Q{0R1XH!uQgw`}F)BdlD~TgnT=IE{-iRBlgZqha1Mm7R;^Kssn{ zF;x%4ZW85r82--q$6H&6*+6XV%WjlyeJX}ozBN242-jQP_#l#7gN%zL-^xqqQ&DUV zPb5V{10^TLV5{u(>&wg7oK0cAo*KHjD0?XRJ%};u8&l)w_Qaf66MN!}jXs;Wu$p6Y z?2fImd)*k@V`u90bzl*4oStm7Te`)@>fO^1_YR{p6-iLd@a^Dd?Tx*S`x_5G4{qJ2 zhu+5CR&Uurk7Rf}>rr->4D&iC49~Sj*q_q^;UzndPlX0h*~hVXtZKk}Fe-$qNs%8S z?pifT3smfhVr|9t6uYn3L&bX1gSixS>QF;?J&q*5gom77G0ZjQveOryL$K=9a><`* zG!XEoABOP8C?UNWhL1*3QnqM+ND33RQaQIPk|Ycl-)=EUK9Dp89Vb2%?LLn+ZEfko zGQVi?bp1D>?BC6bA`z)TAXBW-MIWVa(s=owAua^6MS(mvIeW=K5P(yTO!+#uQgdui z+`{g|Hg-6|f!u$FFbT<8Ltsz}ctss8?iuO6?(Lv=ZHz=iZFBFehL*n-?i+G*vD!+74Gv7xlJk+>MIvVLOup*sT3@yW)l_Nw&*=JJhsO>bs=xDsbte5g0O# z66NKGnJl`aLe=ja_ddz8WQWhfydJ0FFo{x;D?b|*m0nJjme}zJF`rpqgkctdIvaEp z(gLU@e;I;E#5Fy(#;&j??&~PfOrLq|^ztIGzDzAN43#~U{8tbKs04IkqF7@#)%S#+ zKpj&yF{^f>9G5I~>vqIS-zzQpxlV6S5TSjaGsMA=-YgpZCQ(q&_mnL~GLqlH^yIflyiVc`5-k!NB)&^xlLUEOIhux*)s2dlt;t2M zlidQ|$^z{yvb#)cia;7IUxhFLpO3Nzxyeq~mmr*Ti@b(@@;Zr^Na&E#aZD~*3_tUo zb{hrX6_goytm|;>!ZN=oNa@OHSzomemPdk)qxVA>z||0d^G$xfdr&|TQ_{Hk?;$Rb z2=>=&23!XEKnYb|I zRqjV|Dk5svHP}dRUsm^^rzb3bK!XzUGw(m2xo@E*r#PU@_2@QVK+d}Cs=iMzFZ{U3 zS(-duG!&IRl$=BK;I1d16$WMBd&cMYu@^Wx=U(Mz3a-Y{pnQH4@H1H0q&BwuW zut(Ic8gy*n_L)6p)d7h;T777Mx1F*5YdR`9?FF!9YF6B;SLrH8Mrlt-jiDP2ha%-A z)oQqJVVW(k%tP|%0-y@hT(MYZZpTN;&bUvbwt()3c=Ikwm7_MmTVw0&Wn}5;m1W2l zE}W+Ug+C#vQTrv7|HO2_#wj@un&vvqel^-}rzR4{B);p{!DNiv+5)bt-XIeg&UVR?$lISqz7vRj)3|)yh^k#ZX~_~p0O1n$5IodGtlef~%Ei+_Vx;{a|ePY8OII-#vErx<+_nRW-& z1vuuv2>9>$VCSdkkkd#8c1y}S`D5t+-(>%e5#qnnWdAUmto#tR@|^O&Pj>eg807iM zezZJUe9WNS+oNu8Kos~V0FL+zC^6>N)SiH=^+rs|jzDGj{RGB#Tc@y|;(35~u}2aT zl88yCGEbE=lyO?%JZ9m?rQAK)$wheWLgYmcZOAzE2_!k5|26GaSL&AR$@OgZ(J~@4vbtu=l)KfjTefgc$L_c-?`&mIkDZT4DD7G5;yW7@k+@IB$cQNNwt*tkjz9rBz`HWK~jtAkkm^_J$gPm6&?M| zie89L%l*aZjND&}&dUAS&z#n5^jy@qZ?xtj>zdJQ?*8uDR)1ro8?VRt2dj-FZM=K$ z{hn%XE&2YnIBw)yabqXkiB;p&WMbok)eDVxRoKpxcGzva-A|&po1}5p*yyRoTN@im zJBic0v9`Y6coXkR^0=M%Rou7|XUS%IN~Uf1Ac*2TOuE^UzYpCR>uu7}<&;cz225-~ zpGM1yyibh{=r^+O&p@wEzM zLD24oSr!C;HvaqC@>Xvw@7&9&)tB4JMyQszdr(-moN3jkaejMs zY3H7D@va(LCR;b>%-yG_ce6BF#CS(^JuYC44_yTrm>pwieB|s|d-k5Q=MIdHx#vM@ z4XlATum|2rW8e(jq1zFsq$@`3IT*KP#KHc(-5;-Qg=rdh8|4}=G(KEEw|4HGbJss^ zy!-|`o9EV+H>U;cQgCN9Lr+&G)}^TTeRBK4af3}4Dol1`QDNcSPU1U71sdP(=W$U{ zakd4&YgQCDS(tYVbG37N3gW!{X*wrIq7Daz4W=ku#<8C(yCQHEksu)*Xi8u@y zv0~Mnb41dHWPDKvyt30EBi9zkhHKxi!m%YpOgXk&_}6y2NiJ?Zw*S}jXhp57?~u^b z3Ri`B(ra3xVf8FTqkV{OWiy=k4BEsck51O{xI=PdwAUIRc^@RFOCk~u0j`eqlrc2- ztO1Z58Nan6V_=bLo^-=Gf%#f8z{umH8;n}kx*|-Pc3~%JuI9k&<=c8Iix3oMdtx7L z7@F`~Xysm>GItlJ_A+t}Po3cVPLgo;#Qf^p?2i3zq%6@BwR#M#GFMthwI_R_Q-#D=X!Y0{s3|?qUH9$qT&N5cM;vR`pw+yRM3;- zDY73p7_UM$GY}&M{&?KMvBh~-W4R#e# zvvw<1h${h+UpP0DFjI9byy>bJa4WnlS2#@&2*M~z(oG2tiZE#&(f<29cx@Bv98sFE zP|uOK(zNi_dg<-esBqi3uN1Y*VcyNe&+9l}DZ^b;ds0zL z(x9y)Vb-e2rviy- z!c<+ogwGb{)xuG6x3A8jr@l+FMDjAp_eqvP3Rh0G!fuDTT7dv&B>a^i(7L-)Sa+f7 zUO%6}W`ae&CL?&Mtz+hbG#{v(($EN%raFar&2I)ynx%nOf^%+OQPM^mUqleEuM?4H zuJrDtIv|f8r|rgo{5v`a0g0Gqo-|-b8}`%CG9w!h!yzmXb1{PBT0mPVXUSm@c_mOe z4$A{tGlz`(^1L3e$+$hR0bqVbj(Yh@eQMV1coxU?SLUY1=ko;t@(}@YWnB(Vfvm*f zt0i#Ni`cAUWSc^)C79OeUDW8kz!~&%b(W+_BHr~qa^h^_V&ZIcF>y8x)K>`9S2X`B z`Bw$B+VLFic&m7%sKenVu&r+=(YQAX+6Qs_Lp&vxrzcjeDZGMz1i{ws;@IwA0pQ=@ zwE!U-j4j}_aufvUYtSM(ACP(&L2NFioL5Dg(NKrm=x2S%YGe4@dA&2s>}?9zbB6$X zehSLxu`u-t$swRtuXBJZQE!m^h~!n0*GL|PdCP&eSA?#rA8>}~cA{V&yYHc83n0wO zj#&bBZ~*&}i84o6|7IYkXR(zauRo*MI~?Tnv(X8BSlXrQv6d@!g@@=ksVk&T8Yf`H zJ2OAxKl=yBLA>JtHcy})JYnovC2xVZbEo4<>TTmosQseK1DHPWAg@G-$>6II59S$| z8c{o{A}_K@hlu|{t(3YV1r=$%l+K70RHU=~7T!Q9swS+4U#yP5Imd765&7;s`U~Yu zU*4dRUt3ofvA@Dk``f`y7FYV<-luuXP~d31PeK9D?Wz2qnNqV11eZ9(Ku}b0Qg0-i z*^?uJfVviJhqvOO8{Ugm;mK{Kyswt`^ZIzAPZ!?LP>_b4Dfgz14@DKlmgF9?8}%lp z9v%`xjATss7!nWyEMM*(4k`W?{~xaAteQl;NkYSdS-nm24#_wa7JhhpGuY|%x~q|5J*g{kN9Cwi`rn3nlu7?)| z`DO1v7bQ7p>oFY^iGVeAy4nJdU~vsM#R69Sg5(2AZJqX9r=U)Oqkc*LS0pWxCr$g1gCCK6{AJUA&B1`= z1_?vZ;~$+bDnStS+W7xLwqBJKd?`&e^pB3ZOB?+Zf6(>Puv_>OUXW0DKbOBcbpFIT zN(T>~m|Ck>$Wa$lY8xNvMfG+4SIHa1e8$Y&f$mW7*o*Z=fAQqvnZ>h~={ab&Zguzn DR{%bz diff --git a/models/__pycache__/vgg.cpython-36.pyc b/models/__pycache__/vgg.cpython-36.pyc deleted file mode 100644 index 17dda4646985e66d0511dedbb14ef2f2590c61c7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1676 zcmZux-EQ1O6rLH6z23hxENw+gfm|UiqS>YqMU@aL1SvPoDr%vim5}9nW;fnA_Gau& zlgKMo+DN$yPrwy--0~DW!`yPUufPp_Gv53NI@X*sbIzIZ@pr!C&33!7dH%=i-&{ie zA~(hdSUID!io)5F6>4bM`J+VEB0cl&Ss&wqKt>c6%$_IR#h`MN;5z&y&*qu8APc%jAj0|dmy(HcC>HSyR+Z!9bjm_T1 zlitSDwK!edJt;C3?X55HrKG=j2C3=J;w|)oP(otD2)Oqy6RDJso%VwuPU9j7bSpRr z_hpcTCsO6wiH0NX4`o|eR<6$gSmua5MWH|7TMx!7N4`i3> zdJx!%Ao!j9{jj%}9mw8m6@^*vg`8x0Z&CsMFp7tv>K$Z4CV3C%*C!{ciJWd><{kv0 zD|D5fwgATZWtvy0Tq3e%qCbNG4y2@WI%6dpQ%okX6V9B$0*p#l`G5!SD-@J0toR3` z%hd`DnjgjT4L&J8mFhhhx{m4?J3ZD;HZ9a`Y$|cbm}|pKy$O{t=G?$ZXoGezPn#y^ zrS~djG#4>ZHs!ZijK~a0&Ya_)O8Oo7d3VN3F4#U(k4jdMF_Jg!bHPUROGF;7SSu?X zL{Y<<3yvu4<`mjAoqs_`j)^V#%sE{7k=PS1V$pz?_aN3Q*)N6|_5Q-(sEk+`o}r-4 zeW_BJ1O^1{<%NocRCldUeFCLxlTe5_9es~l*Wz@Ngpusllqs98?}W#@S(ZE&+S|&~ zBk1P0VG-^1Gj)Ise<7d$pzGfpjh0c3m-28b(;^O&u4}^Ikx{Ph z!}FgSs1|I-o|_a2<#-y@r*~NgauxG7Jzcd7xIB93gS0{03@6yX59*c!)cgQ z9?M4c0N2{L==PG>kxY^xxK5-?)%zyMaKd(#;gV&rPg}0su^YY>3XcC>4MW7<_=DpKuE zRGb#-o(XWYlgh%PsOiad{x)&V3z-*P#4VXzp{KquCw*ND0+B`dYV!lqe)X$NQ(N_A q#da)+HVJ#`#tUrU*p~6Xs<8gr^j^LS!BL|Ie?Zshr}Q!JeE2V<7 Date: Sun, 10 Jul 2022 11:46:53 -0400 Subject: [PATCH 29/54] Add resnet num_classes --- models/resnet.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/models/resnet.py b/models/resnet.py index b77694cef..aa08cbf08 100644 --- a/models/resnet.py +++ b/models/resnet.py @@ -104,24 +104,24 @@ def forward(self, x): return out -def ResNet18(): - return ResNet(BasicBlock, [2, 2, 2, 2]) +def ResNet18(num_classes=10): + return ResNet(BasicBlock, [2, 2, 2, 2], num_classes) -def ResNet34(): - return ResNet(BasicBlock, [3, 4, 6, 3]) +def ResNet34(num_classes=10): + return ResNet(BasicBlock, [3, 4, 6, 3], num_classes) -def ResNet50(): - return ResNet(Bottleneck, [3, 4, 6, 3]) +def ResNet50(num_classes=10): + return ResNet(Bottleneck, [3, 4, 6, 3], num_classes) -def ResNet101(): - return ResNet(Bottleneck, [3, 4, 23, 3]) +def ResNet101(num_classes=10): + return ResNet(Bottleneck, [3, 4, 23, 3], num_classes) -def ResNet152(): - return ResNet(Bottleneck, [3, 8, 36, 3]) +def ResNet152(num_classes=10): + return ResNet(Bottleneck, [3, 8, 36, 3], num_classes) def test(): From 861f0420c8ecb4da0d557381225ad7bba8035a13 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 13:10:16 -0400 Subject: [PATCH 30/54] Create sweep_n_cls_0.sh --- sweep_n_cls_0.sh | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 sweep_n_cls_0.sh diff --git a/sweep_n_cls_0.sh b/sweep_n_cls_0.sh new file mode 100644 index 000000000..38f63632d --- /dev/null +++ b/sweep_n_cls_0.sh @@ -0,0 +1,29 @@ +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 10 + +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 + +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 10 From 11139528bc9199384e223675c0b049133d8a96c6 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 13:10:38 -0400 Subject: [PATCH 31/54] Create sweep_n_cls_1.sh --- sweep_n_cls_1.sh | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 sweep_n_cls_1.sh diff --git a/sweep_n_cls_1.sh b/sweep_n_cls_1.sh new file mode 100644 index 000000000..33c1e99e1 --- /dev/null +++ b/sweep_n_cls_1.sh @@ -0,0 +1,29 @@ +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 10 + +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 10 + +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 10 From 5ba5748ce1a770bf4f7407dc87fca54989a1bc61 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 14:08:05 -0400 Subject: [PATCH 32/54] Add BasicBlock arg for simpleDLA --- main_n_cls.py | 2 +- main_n_cls_nohup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index e16d5b6df..a9b5bdd40 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -141,7 +141,7 @@ def prepare_dataset(num_class=args.num_class): elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1, args.num_class) elif args.net == 'EfficientNetB0': net = EfficientNetB0(args.num_class) elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() -elif args.net == 'SimpleDLA': net = SimpleDLA(args.num_class) +elif args.net == 'SimpleDLA': net = SimpleDLA(BasicBlock, args.num_class) # Borrow sparsity() and prune() from # https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 6a8c08d50..40821e70a 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -140,7 +140,7 @@ def prepare_dataset(num_class=args.num_class): elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1, args.num_class) elif args.net == 'EfficientNetB0': net = EfficientNetB0(args.num_class) elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() -elif args.net == 'SimpleDLA': net = SimpleDLA(args.num_class) +elif args.net == 'SimpleDLA': net = SimpleDLA(BasicBlock, args.num_class) # Borrow sparsity() and prune() from # https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 From 148b18bdcf2554b79fb4fb540f254e2b031c4e28 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 14:15:56 -0400 Subject: [PATCH 33/54] Split works --- sweep_n_cls_ResNet18.sh | 9 +++++++++ sweep_n_cls_SimpleDLA.sh | 9 +++++++++ sweep_n_cls_VGG19.sh | 9 +++++++++ 3 files changed, 27 insertions(+) create mode 100644 sweep_n_cls_ResNet18.sh create mode 100644 sweep_n_cls_SimpleDLA.sh create mode 100644 sweep_n_cls_VGG19.sh diff --git a/sweep_n_cls_ResNet18.sh b/sweep_n_cls_ResNet18.sh new file mode 100644 index 000000000..4542ebd65 --- /dev/null +++ b/sweep_n_cls_ResNet18.sh @@ -0,0 +1,9 @@ +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 10 diff --git a/sweep_n_cls_SimpleDLA.sh b/sweep_n_cls_SimpleDLA.sh new file mode 100644 index 000000000..ee2a2d1a2 --- /dev/null +++ b/sweep_n_cls_SimpleDLA.sh @@ -0,0 +1,9 @@ +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 10 diff --git a/sweep_n_cls_VGG19.sh b/sweep_n_cls_VGG19.sh new file mode 100644 index 000000000..2e14dc6eb --- /dev/null +++ b/sweep_n_cls_VGG19.sh @@ -0,0 +1,9 @@ +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 2 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 3 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 4 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 5 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 6 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 7 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 8 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 9 +python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 10 From 1644d322af0792371b92f1c154b2d77c55956e81 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 19:26:01 -0400 Subject: [PATCH 34/54] training and testing devices --- main_n_cls.py | 3 ++- main_n_cls_nohup.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index a9b5bdd40..c326f7e7e 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -215,6 +215,7 @@ def train(epoch): total = 0 # for batch_idx, (inputs, targets) in enumerate(trainloader): for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): + print('training device: ', device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] @@ -261,7 +262,7 @@ def test(epoch): with torch.no_grad(): # for batch_idx, (inputs, targets) in enumerate(testloader): for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): - print('device: ', device) + print('testing device: ', device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] diff --git a/main_n_cls_nohup.py b/main_n_cls_nohup.py index 40821e70a..61c32057c 100644 --- a/main_n_cls_nohup.py +++ b/main_n_cls_nohup.py @@ -214,6 +214,7 @@ def train(epoch): total = 0 # for batch_idx, (inputs, targets) in enumerate(trainloader): for batch_idx in range(len(train_inputs_n_cls) // args.train_batch_size): + print('training device: ', device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(train_inputs_n_cls): inputs = train_inputs_n_cls[batch_idx * args.train_batch_size : (batch_idx + 1) * args.train_batch_size] @@ -257,7 +258,7 @@ def test(epoch): with torch.no_grad(): # for batch_idx, (inputs, targets) in enumerate(testloader): for batch_idx in range(len(test_inputs_n_cls) // args.test_batch_size + 1): - print('device: ', device) + print('testing device: ', device) inputs, targets = None, None if (batch_idx + 1) * args.train_batch_size < len(test_inputs_n_cls): inputs = test_inputs_n_cls[batch_idx * args.test_batch_size :] From bb0c72bce66061b30fa310a32e6530b3d3b1ed1d Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 22:05:09 -0400 Subject: [PATCH 35/54] Add more bash scrpits for training --- sweep_n_cls_0_GoogLeNet_CUDA_2.sh | 9 +++++++++ sweep_n_cls_EfficientNetB0_CUDA_2.sh | 9 +++++++++ sweep_n_cls_MobileNet_CUDA_0.sh | 9 +++++++++ sweep_n_cls_ResNet18_CUDA_1.sh | 9 +++++++++ sweep_n_cls_SimpleDLA_CUDA_1.sh | 9 +++++++++ sweep_n_cls_VGG19_CUDA_1.sh | 9 +++++++++ 6 files changed, 54 insertions(+) create mode 100644 sweep_n_cls_0_GoogLeNet_CUDA_2.sh create mode 100644 sweep_n_cls_EfficientNetB0_CUDA_2.sh create mode 100644 sweep_n_cls_MobileNet_CUDA_0.sh create mode 100644 sweep_n_cls_ResNet18_CUDA_1.sh create mode 100644 sweep_n_cls_SimpleDLA_CUDA_1.sh create mode 100644 sweep_n_cls_VGG19_CUDA_1.sh diff --git a/sweep_n_cls_0_GoogLeNet_CUDA_2.sh b/sweep_n_cls_0_GoogLeNet_CUDA_2.sh new file mode 100644 index 000000000..cb033af5f --- /dev/null +++ b/sweep_n_cls_0_GoogLeNet_CUDA_2.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 diff --git a/sweep_n_cls_EfficientNetB0_CUDA_2.sh b/sweep_n_cls_EfficientNetB0_CUDA_2.sh new file mode 100644 index 000000000..b8c3e5675 --- /dev/null +++ b/sweep_n_cls_EfficientNetB0_CUDA_2.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 2 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 3 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 4 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 5 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 6 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 7 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 8 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 9 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 10 diff --git a/sweep_n_cls_MobileNet_CUDA_0.sh b/sweep_n_cls_MobileNet_CUDA_0.sh new file mode 100644 index 000000000..dcc184746 --- /dev/null +++ b/sweep_n_cls_MobileNet_CUDA_0.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 2 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 3 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 4 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 5 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 6 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 7 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 8 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 9 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net MobileNet --num_class 10 diff --git a/sweep_n_cls_ResNet18_CUDA_1.sh b/sweep_n_cls_ResNet18_CUDA_1.sh new file mode 100644 index 000000000..4653dee41 --- /dev/null +++ b/sweep_n_cls_ResNet18_CUDA_1.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 2 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 3 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 4 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 5 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 6 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 7 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 8 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 9 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net ResNet18 --num_class 10 diff --git a/sweep_n_cls_SimpleDLA_CUDA_1.sh b/sweep_n_cls_SimpleDLA_CUDA_1.sh new file mode 100644 index 000000000..2bf9a209c --- /dev/null +++ b/sweep_n_cls_SimpleDLA_CUDA_1.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 2 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 3 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 4 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 5 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 6 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 7 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 8 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 9 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net SimpleDLA --num_class 10 diff --git a/sweep_n_cls_VGG19_CUDA_1.sh b/sweep_n_cls_VGG19_CUDA_1.sh new file mode 100644 index 000000000..b3cd40963 --- /dev/null +++ b/sweep_n_cls_VGG19_CUDA_1.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 2 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 3 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 4 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 5 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 6 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 7 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 8 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 9 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net VGG19 --num_class 10 From fa96e891a17aeaa042419cec3617e38f98557c0e Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 22:36:57 -0400 Subject: [PATCH 36/54] Add CUDA_3 --- sweep_n_cls_EfficientNetB0_CUDA_3.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 sweep_n_cls_EfficientNetB0_CUDA_3.sh diff --git a/sweep_n_cls_EfficientNetB0_CUDA_3.sh b/sweep_n_cls_EfficientNetB0_CUDA_3.sh new file mode 100644 index 000000000..b84a66a2d --- /dev/null +++ b/sweep_n_cls_EfficientNetB0_CUDA_3.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 2 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 3 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 4 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 5 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 6 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 7 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 8 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 9 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 10 From f048194ce59b9206dcbe333b52232fc92bb75772 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 22:37:58 -0400 Subject: [PATCH 37/54] Add CUDA_3 --- sweep_n_cls_0_GoogLeNet_CUDA_3.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 sweep_n_cls_0_GoogLeNet_CUDA_3.sh diff --git a/sweep_n_cls_0_GoogLeNet_CUDA_3.sh b/sweep_n_cls_0_GoogLeNet_CUDA_3.sh new file mode 100644 index 000000000..e85acf91c --- /dev/null +++ b/sweep_n_cls_0_GoogLeNet_CUDA_3.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 From d6aca367393b9e4e1e1d9119831c03d585d1b58b Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 22:40:24 -0400 Subject: [PATCH 38/54] Add arg num_classes in EfficientNetB0 --- models/efficientnet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/efficientnet.py b/models/efficientnet.py index 53d8c7b05..de58265d5 100644 --- a/models/efficientnet.py +++ b/models/efficientnet.py @@ -151,7 +151,7 @@ def forward(self, x): return out -def EfficientNetB0(): +def EfficientNetB0(num_classes=10): cfg = { 'num_blocks': [1, 2, 2, 3, 3, 4, 1], 'expansion': [1, 6, 6, 6, 6, 6, 6], @@ -161,7 +161,7 @@ def EfficientNetB0(): 'dropout_rate': 0.2, 'drop_connect_rate': 0.2, } - return EfficientNet(cfg) + return EfficientNet(cfg, num_classes=10) def test(): From e349cf08a9be72d9f32e099a7851737541281b02 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 22:45:06 -0400 Subject: [PATCH 39/54] Correct bash scripts --- sweep_n_cls_EfficientNetB0_CUDA_0.sh | 9 +++++++++ sweep_n_cls_GoogLeNet_CUDA_2.sh | 9 +++++++++ sweep_n_cls_GoogLeNet_CUDA_3.sh | 9 +++++++++ 3 files changed, 27 insertions(+) create mode 100644 sweep_n_cls_EfficientNetB0_CUDA_0.sh create mode 100644 sweep_n_cls_GoogLeNet_CUDA_2.sh create mode 100644 sweep_n_cls_GoogLeNet_CUDA_3.sh diff --git a/sweep_n_cls_EfficientNetB0_CUDA_0.sh b/sweep_n_cls_EfficientNetB0_CUDA_0.sh new file mode 100644 index 000000000..3b5fcfcc7 --- /dev/null +++ b/sweep_n_cls_EfficientNetB0_CUDA_0.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 2 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 3 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 4 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 5 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 6 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 7 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 8 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 9 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net EfficientNetB0 --num_class 10 diff --git a/sweep_n_cls_GoogLeNet_CUDA_2.sh b/sweep_n_cls_GoogLeNet_CUDA_2.sh new file mode 100644 index 000000000..cb033af5f --- /dev/null +++ b/sweep_n_cls_GoogLeNet_CUDA_2.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +CUDA_VISIBLE_DEVICES=2 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 diff --git a/sweep_n_cls_GoogLeNet_CUDA_3.sh b/sweep_n_cls_GoogLeNet_CUDA_3.sh new file mode 100644 index 000000000..e85acf91c --- /dev/null +++ b/sweep_n_cls_GoogLeNet_CUDA_3.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +CUDA_VISIBLE_DEVICES=3 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 From 6bc10986774e9875781b3dbe8db7f3770b1e0df6 Mon Sep 17 00:00:00 2001 From: BBC Date: Sun, 10 Jul 2022 22:48:31 -0400 Subject: [PATCH 40/54] GoogLeNet CUDA_0 --- sweep_n_cls_GoogLeNet_CUDA_0.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 sweep_n_cls_GoogLeNet_CUDA_0.sh diff --git a/sweep_n_cls_GoogLeNet_CUDA_0.sh b/sweep_n_cls_GoogLeNet_CUDA_0.sh new file mode 100644 index 000000000..3cbcbd62d --- /dev/null +++ b/sweep_n_cls_GoogLeNet_CUDA_0.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +CUDA_VISIBLE_DEVICES=0 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 From 9adf370dcc0686ed85824d85bfdfbb0677f9d61e Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 11 Jul 2022 22:10:24 -0400 Subject: [PATCH 41/54] Add CUDA --- sweep_n_cls_GoogLeNet_CUDA_1.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 sweep_n_cls_GoogLeNet_CUDA_1.sh diff --git a/sweep_n_cls_GoogLeNet_CUDA_1.sh b/sweep_n_cls_GoogLeNet_CUDA_1.sh new file mode 100644 index 000000000..2d5e7097f --- /dev/null +++ b/sweep_n_cls_GoogLeNet_CUDA_1.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 2 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 3 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 4 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 5 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 6 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 7 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 8 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 9 +CUDA_VISIBLE_DEVICES=1 python3 main_n_cls.py --train --test --epochs 200 --net GoogLeNet --num_class 10 From aa20d8e7c62c3b56efd4f0b3aa29db03865be41d Mon Sep 17 00:00:00 2001 From: BBC Date: Wed, 13 Jul 2022 15:43:15 -0400 Subject: [PATCH 42/54] Add num_class --- test_sim.py | 6 +- test_sim_v0.py | 302 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 307 insertions(+), 1 deletion(-) create mode 100644 test_sim_v0.py diff --git a/test_sim.py b/test_sim.py index b3eb3c207..e70eb2f94 100644 --- a/test_sim.py +++ b/test_sim.py @@ -31,6 +31,9 @@ parser.add_argument('--pruning_rate', type=float, default=0.30) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') +parser.add_argument('--num_class', type=int, default=10) +parser.add_argument('--save_model_epoch_interval', type=int, default=10) +parser.add_argument('--load_epoch', type=str, default='best', help='best | ') args = parser.parse_args() @@ -135,7 +138,8 @@ def count_layer_params(model, layer_name=nn.Conv2d): assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' print('\n\ndevice: ', device) - checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) + checkpoint = torch.load('./checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.num_class, args.load_epoch), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) print('\n model weights loaded!') best_acc = checkpoint['acc'] diff --git a/test_sim_v0.py b/test_sim_v0.py new file mode 100644 index 000000000..b3eb3c207 --- /dev/null +++ b/test_sim_v0.py @@ -0,0 +1,302 @@ +'''Train CIFAR10 with PyTorch.''' +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +import torch.backends.cudnn as cudnn +from torchinfo import summary + +import torchvision +import torchvision.transforms as transforms + +import os +import argparse + +from models import * +from utils import progress_bar +import time +import numpy as np +import matplotlib.pyplot as plt +import cv2 + +parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') +parser.add_argument('--lr', default=0.1, type=float, help='learning rate') +parser.add_argument('--resume', '-r', action='store_true', + help='resume from checkpoint') +parser.add_argument('--net', default='SimpleDLA') +parser.add_argument('--train', action='store_true') +parser.add_argument('--test', action='store_true') +parser.add_argument('--epochs', type=int, default=200) +parser.add_argument('--prune', action='store_true') +parser.add_argument('--pruning_rate', type=float, default=0.30) +parser.add_argument('--test_batch_size', type=int, default=100) +parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') + +args = parser.parse_args() + +device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' +best_acc = 0 # best test accuracy +start_epoch = 0 # start from epoch 0 or last checkpoint epoch + +# Data +print('==> Preparing data..') +transform_train = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), +]) + +trainset = torchvision.datasets.CIFAR10( + root='./data', train=True, download=True, transform=transform_train) +trainloader = torch.utils.data.DataLoader( + trainset, batch_size=128, shuffle=True, num_workers=2) + +testset = torchvision.datasets.CIFAR10( + root='./data', train=False, download=True, transform=transform_test) +testloader = torch.utils.data.DataLoader( + testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) + +classes = ('plane', 'car', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck') + +# Model +print('==> Building model..') +if args.net == 'VGG19': net = VGG('VGG19') +elif args.net == 'ResNet18': net = ResNet18() +elif args.net == 'PreActResNet18': net = PreActResNet18() +elif args.net == 'GoogLeNet': net = GoogLeNet() +elif args.net == 'DenseNet121': net = DenseNet121() +elif args.net == 'ResNeXt29_2x64d': net = ResNeXt29_2x64d() +elif args.net == 'MobileNet': net = MobileNet() +elif args.net == 'MobileNetV2': net = MobileNetV2() +elif args.net == 'DPN92': net = DPN92() +elif args.net == 'ShuffleNetG2': net = ShuffleNetG2() +elif args.net == 'SENet18': net = SENet18() +elif args.net == 'ShuffleNetV2': net = ShuffleNetV2(1) +elif args.net == 'EfficientNetB0': net = EfficientNetB0() +elif args.net == 'RegNetX_200MF': net = RegNetX_200MF() +elif args.net == 'SimpleDLA': net = SimpleDLA() + +# Borrow sparsity() and prune() from +# https://github.com/ultralytics/yolov5/blob/a2a1ed201d150343a4f9912d644be2b210206984/utils/torch_utils.py#L174 +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def count_layer_params(model, layer_name=nn.Conv2d): + print('\n\n layer_name: ', layer_name) + total_params = 0 + total_traina_params = 0 + n_layers = 0 + for name, m in model.named_modules(): + if isinstance(m, layer_name): + # print('\nm:', m) + # print('\ndir(m): ', dir(m)) + + for name, parameter in m.named_parameters(): + params = parameter.numel() + total_params += params + if not parameter.requires_grad: continue + n_layers += 1 + total_traina_params += params + print('\n\nlayer_name: {}, total_params: {}, total_traina_params: {}, n_layers: {}'.\ + format(layer_name, total_params, total_traina_params, n_layers)) + # time.sleep(100) + +net = net.to(device) +if device == 'cuda': + if args.train: net = torch.nn.DataParallel(net) + cudnn.benchmark = True + +if args.resume: + # Load checkpoint. + print('==> Resuming from checkpoint..') + assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' + + print('\n\ndevice: ', device) + checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) + net.load_state_dict(checkpoint['net'], strict=False) + print('\n model weights loaded!') + best_acc = checkpoint['acc'] + start_epoch = checkpoint['epoch'] + +criterion = nn.CrossEntropyLoss() +optimizer = optim.SGD(net.parameters(), lr=args.lr, + momentum=0.9, weight_decay=5e-4) +scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) + + +# Training +def train(epoch): + print('\nEpoch: %d' % epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + +def test(epoch): + global best_acc + if args.prune: + prune(net, args.pruning_rate) + input_size = (1, 3, 32, 32) + summary(net, input_size) + count_layer_params(net) + + # print('------------') + # print(net) + + def extract_features_labels(my_net, my_data): + intermediate_outputs = [] + + def hook(module, input, output): + for output_each in output: + intermediate_outputs.append(output_each.detach().cpu().tolist()) # intermediate outputs + + # print('np.shape(my_data): ', np.shape(my_data)) + # print('\n net: ', net) + # print('\n dir(my_net): ', dir(my_net)) + # print('\n dir(my_net.children()): ', dir(my_net.children())) + my_net.layer4[1].shortcut.register_forward_hook(hook) # intermediate outputs from the third last fc layer + + features = [] + labels = [] + # print('\n np.shape(my_data): ', np.shape(my_data)) + ''' + e.g. + np.shape(my_data): torch.Size([1, 3, 32, 32]) + ''' + # e.g. torch.Size([1, 3, 32, 32]) + # for step, (x, y) in enumerate(my_data): + # batch_x = Variable(x) + # batch_y = Variable(y) + # output = my_net(batch_x) + # labels.extend(batch_y.numpy().tolist()) + + output = my_net(my_data) + features = torch.from_numpy(np.array(intermediate_outputs)) + # print('\n np.shape(output): ', np.shape(output)) + # print('\n np.shape(np.array(features)): ', np.shape(np.array(features))) + ''' + e.g. + np.shape(output): torch.Size([1, 10]) + np.shape(np.array(features)): (1, 512, 4, 4) + ''' + # features = torch.flatten(features) + # print('\n after flattened - np.shape(features): ', np.shape(features)) + # return np.array(features) + # print('\n np.shape(features): ', np.shape(features)) + return features + + def vis_compute_sim(net, input_0, input_1): + feats_0 = extract_features_labels(net, input_0) + feats_1 = extract_features_labels(net, input_1) + input_0 = (input_0 / 2 + 0.5) # * 255 + input_1 = (input_1 / 2 + 0.5) # * 255 + + plt.imshow(np.transpose(torch.squeeze(input_0).cpu(), (1, 2, 0))); plt.show() + plt.imshow(np.transpose(torch.squeeze(input_1).cpu(), (1, 2, 0))); plt.show() + # sim_score = np.cos(np.array(feats_0).flatten(), np.array(feats_1).flatten()) + print('\n np.shape(feats_0): ', np.shape(feats_0)) + # torch.Size([1, 512, 4, 4]) + cos = nn.CosineSimilarity(dim=1, eps=1e-6) + sim_score = cos(feats_0, feats_1) + print('\n np.shape(sim_score): ', np.shape(sim_score)) + sim_score = sim_score[0][0][0] # torch.mean(sim_score) + return sim_score + + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + # print('device: ', device) + # print('\n np.shape(inputs): ', np.shape(inputs)) + # e.g. torch.Size([5, 3, 32, 32]) + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + # print('targets: ', targets) + # print('\n np.shape(targets): ', np.shape(targets)) + # print('\n np.shape(inputs): ', np.shape(inputs)) + # e.g. + # np.shape(targets): torch.Size([5]) + # np.shape(inputs): torch.Size([5, 3, 32, 32]) # [batch_size, channels, , ] + + for i in range(args.test_batch_size): + for j in range(args.test_batch_size): + input_0, input_1 = torch.unsqueeze(inputs[i], dim=0), torch.unsqueeze(inputs[j], dim=0) + sim_score = vis_compute_sim(net, input_0, input_1) + print('\n =====================') + print('\n i: ', i, ', j: ', j) + print('\n targets[i]: ', targets[i], ', targets[j]: ', targets[j]) + print('\n sim_score: ', sim_score) + print('\n ---------------------') + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' + % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) + + # Save checkpoint. + acc = 100.*correct/total + if acc > best_acc: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + best_acc = acc + +print('\n\nargs.train: ', args.train, ', args.test:', args.test) +for epoch in range(args.epochs): + if args.train: train(epoch) + if args.test: + test(epoch) + if not args.train: break + scheduler.step() From ecac0bf54dcebc8d4c53f9a72b8bba8d7dbf6038 Mon Sep 17 00:00:00 2001 From: BBC Date: Wed, 13 Jul 2022 16:07:36 -0400 Subject: [PATCH 43/54] Save 10 classes by default --- main.py | 31 ++++++++++++++++++++++--------- main_nohup.py | 20 ++++++++++++++++---- 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/main.py b/main.py index 7301c5cff..3e18fcbad 100644 --- a/main.py +++ b/main.py @@ -34,6 +34,7 @@ device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch +num_class = 10 # Data print('==> Preparing data..') @@ -107,13 +108,13 @@ def count_layer_params(model, layer_name=nn.Conv2d): total_traina_params = 0 n_layers = 0 for name, m in model.named_modules(): - if isinstance(m, layer_name): + if isinstance(m, layer_name): # print('\nm:', m) # print('\ndir(m): ', dir(m)) - + for name, parameter in m.named_parameters(): params = parameter.numel() - total_params += params + total_params += params if not parameter.requires_grad: continue n_layers += 1 total_traina_params += params @@ -130,8 +131,8 @@ def count_layer_params(model, layer_name=nn.Conv2d): # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' - - print('\n\ndevice: ', device) + + print('\n\ndevice: ', device) checkpoint = torch.load('./checkpoint/{}_ckpt.pth'.format(args.net), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] @@ -172,7 +173,7 @@ def test(epoch): if args.prune: prune(net, args.pruning_rate) input_size = (1, 3, 32, 32) - summary(net, input_size) + summary(net, input_size) count_layer_params(net) @@ -191,12 +192,24 @@ def test(epoch): _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() - + progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total)) # Save checkpoint. acc = 100.*correct/total + if epoch % args.save_model_epoch_interval == 0: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, num_class, str(epoch))) + best_acc = acc if acc > best_acc: print('Saving..') state = { @@ -206,7 +219,8 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_best_ckpt.pth'.\ + format(args.net, num_class)) best_acc = acc print('\n\nargs.train: ', args.train, ', args.test:', args.test) @@ -216,4 +230,3 @@ def test(epoch): test(epoch) if not args.train: break scheduler.step() - diff --git a/main_nohup.py b/main_nohup.py index 0ac30f9e5..68f8884f0 100644 --- a/main_nohup.py +++ b/main_nohup.py @@ -32,6 +32,7 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch +num_class = 10 # Data print('==> Preparing data..') @@ -158,10 +159,22 @@ def test(epoch): _, predicted = outputs.max(1) total += targets.size(0) correct += predicted.eq(targets).sum().item() - + # Save checkpoint. acc = 100.*correct/total + if epoch % args.save_model_epoch_interval == 0: + print('Saving..') + state = { + 'net': net.state_dict(), + 'acc': acc, + 'epoch': epoch, + } + if not os.path.isdir('checkpoint'): + os.mkdir('checkpoint') + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, num_class, str(epoch))) + best_acc = acc if acc > best_acc: print('Saving..') state = { @@ -171,14 +184,13 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_ckpt.pth'.format(args.net)) + torch.save(state, './checkpoint/{}_n_cls_{}_epoch_best_ckpt.pth'.\ + format(args.net, num_class)) best_acc = acc - for epoch in range(args.epochs): if args.train: train(epoch) if args.test: test(epoch) if not args.train: break scheduler.step() - From c3a1e13b640e71675d1a551029a8fc7e2176c377 Mon Sep 17 00:00:00 2001 From: BBC Date: Wed, 13 Jul 2022 16:10:48 -0400 Subject: [PATCH 44/54] Correct args --- main.py | 2 ++ main_nohup.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/main.py b/main.py index 3e18fcbad..ce9f211f8 100644 --- a/main.py +++ b/main.py @@ -28,6 +28,8 @@ parser.add_argument('--pruning_rate', type=float, default=0.30) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') +parser.add_argument('--save_model_epoch_interval', type=int, default=10) +parser.add_argument('--load_epoch', type=str, default='best', help='best | ') args = parser.parse_args() diff --git a/main_nohup.py b/main_nohup.py index 68f8884f0..0f527c516 100644 --- a/main_nohup.py +++ b/main_nohup.py @@ -26,6 +26,8 @@ parser.add_argument('--pruning_rate', type=float, default=0.30) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') +parser.add_argument('--save_model_epoch_interval', type=int, default=10) +parser.add_argument('--load_epoch', type=str, default='best', help='best | ') args = parser.parse_args() From 848a806ba1e8b5d3b973ed3f45200452cd67f6bf Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:09:37 -0400 Subject: [PATCH 45/54] D and S groups --- main_n_cls.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index c326f7e7e..9763f8077 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -30,7 +30,7 @@ parser.add_argument('--train_batch_size', type=int, default=128) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') -parser.add_argument('--num_class', type=int, default=10) +parser.add_argument('--num_class', type=str, default='10, D4, S4') parser.add_argument('--save_model_epoch_interval', type=int, default=10) parser.add_argument('--load_epoch', type=str, default='best', help='best | ') @@ -65,7 +65,13 @@ def prepare_dataset(num_class=args.num_class): testloader_all_cls = torch.utils.data.DataLoader( testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) - n_cls_ls = list(range(num_class)) + # class labels: https://www.cs.toronto.edu/~kriz/cifar.html + if num_class == 'D2G0': n_cls_ls = [0, 6] # airplane, frog + elif num_class == 'S2G0': n_cls_ls = [4, 7] # deer, horse + elif num_class == 'S2G1': n_cls_ls = [1, 9] # automobile, truck + elif num_class == 'D4': n_cls_ls = [0, 1, 3, 8] # airplane, automobile, cat, ship + elif num_class == 'S4': n_cls_ls = [3, 4, 5, 6] # cat, deer, dog, horse + else: n_cls_ls = list(range(int(num_class))) # Prepare n_cls data for train set train_inputs_n_cls, train_targets_n_cls = None, None From 0f86d7f0fb2e130ef60ab57515bb5aa58e2c2513 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:11:29 -0400 Subject: [PATCH 46/54] Add num_class groups --- main_n_cls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main_n_cls.py b/main_n_cls.py index 9763f8077..bc71a6999 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -30,7 +30,7 @@ parser.add_argument('--train_batch_size', type=int, default=128) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') -parser.add_argument('--num_class', type=str, default='10, D4, S4') +parser.add_argument('--num_class', type=str, default='10, D2G0, S2G0, S2G1, D4, S4') parser.add_argument('--save_model_epoch_interval', type=int, default=10) parser.add_argument('--load_epoch', type=str, default='best', help='best | ') From c792b84f1d2d87bc831872eb7bfcfded63167499 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:13:57 -0400 Subject: [PATCH 47/54] Add D2G1 --- main_n_cls.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/main_n_cls.py b/main_n_cls.py index bc71a6999..ef4e6d068 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -30,7 +30,7 @@ parser.add_argument('--train_batch_size', type=int, default=128) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') -parser.add_argument('--num_class', type=str, default='10, D2G0, S2G0, S2G1, D4, S4') +parser.add_argument('--num_class', type=str, default='10, D2G0, D2G1, S2G0, S2G1, D4, S4') parser.add_argument('--save_model_epoch_interval', type=int, default=10) parser.add_argument('--load_epoch', type=str, default='best', help='best | ') @@ -67,6 +67,7 @@ def prepare_dataset(num_class=args.num_class): # class labels: https://www.cs.toronto.edu/~kriz/cifar.html if num_class == 'D2G0': n_cls_ls = [0, 6] # airplane, frog + elif num_class == 'D2G1': n_cls_ls = [4, 8] # deer, ship elif num_class == 'S2G0': n_cls_ls = [4, 7] # deer, horse elif num_class == 'S2G1': n_cls_ls = [1, 9] # automobile, truck elif num_class == 'D4': n_cls_ls = [0, 1, 3, 8] # airplane, automobile, cat, ship From 2a98e5d6c8c6e6bfee9c81885314b01972d9ea52 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:20:11 -0400 Subject: [PATCH 48/54] Correct class group --- main_n_cls.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index ef4e6d068..71cc3182a 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -30,7 +30,8 @@ parser.add_argument('--train_batch_size', type=int, default=128) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') -parser.add_argument('--num_class', type=str, default='10, D2G0, D2G1, S2G0, S2G1, D4, S4') +parser.add_argument('--class_group', type=str, default='ORI, D2G0, D2G1, S2G0, S2G1, D4, S4') +parser.add_argument('--num_class', type=int, default=10) parser.add_argument('--save_model_epoch_interval', type=int, default=10) parser.add_argument('--load_epoch', type=str, default='best', help='best | ') @@ -39,6 +40,8 @@ device = 'cuda' if torch.cuda.is_available() and args.select_device == 'gpu' else 'cpu' best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch +if args.class_group != 'ORI': + args.num_class = int(args.class_group[1]) # Data print('==> Preparing data..') @@ -66,13 +69,13 @@ def prepare_dataset(num_class=args.num_class): testset, batch_size=args.test_batch_size, shuffle=False, num_workers=1) # class labels: https://www.cs.toronto.edu/~kriz/cifar.html - if num_class == 'D2G0': n_cls_ls = [0, 6] # airplane, frog - elif num_class == 'D2G1': n_cls_ls = [4, 8] # deer, ship - elif num_class == 'S2G0': n_cls_ls = [4, 7] # deer, horse - elif num_class == 'S2G1': n_cls_ls = [1, 9] # automobile, truck - elif num_class == 'D4': n_cls_ls = [0, 1, 3, 8] # airplane, automobile, cat, ship - elif num_class == 'S4': n_cls_ls = [3, 4, 5, 6] # cat, deer, dog, horse - else: n_cls_ls = list(range(int(num_class))) + if args.class_group == 'D2G0': n_cls_ls = [0, 6] # airplane, frog + elif args.class_group == 'D2G1': n_cls_ls = [4, 8] # deer, ship + elif args.class_group == 'S2G0': n_cls_ls = [4, 7] # deer, horse + elif args.class_group == 'S2G1': n_cls_ls = [1, 9] # automobile, truck + elif args.class_group == 'D4': n_cls_ls = [0, 1, 3, 8] # airplane, automobile, cat, ship + elif args.class_group == 'S4': n_cls_ls = [3, 4, 5, 6] # cat, deer, dog, horse + else: n_cls_ls = list(range(num_class)) # Prepare n_cls data for train set train_inputs_n_cls, train_targets_n_cls = None, None From 1c8bc3d229e68eb1d963d36a5c2681851c9be5e5 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:25:36 -0400 Subject: [PATCH 49/54] Convert target into [0..n] range --- main_n_cls.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 71cc3182a..65c27a9a4 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -95,10 +95,10 @@ def prepare_dataset(num_class=args.num_class): # e.g. torch.Size([3, 32, 32]) if train_inputs_n_cls is None and train_targets_n_cls is None: train_inputs_n_cls = torch.unsqueeze(input_, axis=0) - train_targets_n_cls = torch.unsqueeze(targets[in_i], axis=0) + train_targets_n_cls = torch.unsqueeze(n_cls_ls.index(targets[in_i]), axis=0) else: train_inputs_n_cls = torch.cat((train_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(targets[in_i], axis=0)), 0) + train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(n_cls_ls(targets[in_i]), axis=0)), 0) # train_inputs_n_cls.append(input_) print('\n prepare_dataset() - train_inputs_n_cls.shape: ', train_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) @@ -121,10 +121,10 @@ def prepare_dataset(num_class=args.num_class): # e.g. torch.Size([3, 32, 32]) if test_inputs_n_cls is None and test_targets_n_cls is None: test_inputs_n_cls = torch.unsqueeze(input_, axis=0) - test_targets_n_cls = torch.unsqueeze(targets[in_i], axis=0) + test_targets_n_cls = torch.unsqueeze(n_cls_ls(targets[in_i]), axis=0) else: test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(targets[in_i], axis=0)), 0) + test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(n_cls_ls(targets[in_i]), axis=0)), 0) # test_inputs_n_cls.append(input_) print('\n prepare_dataset() - test_inputs_n_cls.shape: ', test_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) From f55c39d221f7b18c91dea2085fea17cbafaa2c31 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:31:35 -0400 Subject: [PATCH 50/54] Add tensor --- main_n_cls.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 65c27a9a4..1661e6bef 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -95,10 +95,10 @@ def prepare_dataset(num_class=args.num_class): # e.g. torch.Size([3, 32, 32]) if train_inputs_n_cls is None and train_targets_n_cls is None: train_inputs_n_cls = torch.unsqueeze(input_, axis=0) - train_targets_n_cls = torch.unsqueeze(n_cls_ls.index(targets[in_i]), axis=0) + train_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0) else: train_inputs_n_cls = torch.cat((train_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(n_cls_ls(targets[in_i]), axis=0)), 0) + train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0)), 0) # train_inputs_n_cls.append(input_) print('\n prepare_dataset() - train_inputs_n_cls.shape: ', train_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) @@ -121,10 +121,10 @@ def prepare_dataset(num_class=args.num_class): # e.g. torch.Size([3, 32, 32]) if test_inputs_n_cls is None and test_targets_n_cls is None: test_inputs_n_cls = torch.unsqueeze(input_, axis=0) - test_targets_n_cls = torch.unsqueeze(n_cls_ls(targets[in_i]), axis=0) + test_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0) else: test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(n_cls_ls(targets[in_i]), axis=0)), 0) + test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0)), 0) # test_inputs_n_cls.append(input_) print('\n prepare_dataset() - test_inputs_n_cls.shape: ', test_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) From 00fd6fb812e4096240d985c5b73180afb7755b54 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:32:37 -0400 Subject: [PATCH 51/54] Correct the bug of index --- main_n_cls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 1661e6bef..31d9d4854 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -98,7 +98,7 @@ def prepare_dataset(num_class=args.num_class): train_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0) else: train_inputs_n_cls = torch.cat((train_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0)), 0) + train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.intex(targets[in_i])), axis=0)), 0) # train_inputs_n_cls.append(input_) print('\n prepare_dataset() - train_inputs_n_cls.shape: ', train_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) @@ -124,7 +124,7 @@ def prepare_dataset(num_class=args.num_class): test_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0) else: test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0)), 0) + test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.intex(targets[in_i])), axis=0)), 0) # test_inputs_n_cls.append(input_) print('\n prepare_dataset() - test_inputs_n_cls.shape: ', test_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) From 4fd97283e800d777319ac9744490b64768ec50ea Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:33:10 -0400 Subject: [PATCH 52/54] Correct the bug of index --- main_n_cls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 31d9d4854..6aabf1c13 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -98,7 +98,7 @@ def prepare_dataset(num_class=args.num_class): train_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0) else: train_inputs_n_cls = torch.cat((train_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.intex(targets[in_i])), axis=0)), 0) + train_targets_n_cls = torch.cat((train_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0)), 0) # train_inputs_n_cls.append(input_) print('\n prepare_dataset() - train_inputs_n_cls.shape: ', train_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) @@ -124,7 +124,7 @@ def prepare_dataset(num_class=args.num_class): test_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0) else: test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) - test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.intex(targets[in_i])), axis=0)), 0) + test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0)), 0) # test_inputs_n_cls.append(input_) print('\n prepare_dataset() - test_inputs_n_cls.shape: ', test_inputs_n_cls.shape) # e.g. torch.Size([128, 3, 32, 32]) From 5535497e12e0dc3df2ffc0ced97c243d48b0b365 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 16:34:06 -0400 Subject: [PATCH 53/54] Correct the bug of index --- main_n_cls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main_n_cls.py b/main_n_cls.py index 6aabf1c13..07837f287 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -121,7 +121,7 @@ def prepare_dataset(num_class=args.num_class): # e.g. torch.Size([3, 32, 32]) if test_inputs_n_cls is None and test_targets_n_cls is None: test_inputs_n_cls = torch.unsqueeze(input_, axis=0) - test_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls(targets[in_i])), axis=0) + test_targets_n_cls = torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0) else: test_inputs_n_cls = torch.cat((test_inputs_n_cls, torch.unsqueeze(input_, axis=0)), 0) test_targets_n_cls = torch.cat((test_targets_n_cls, torch.unsqueeze(torch.tensor(n_cls_ls.index(targets[in_i])), axis=0)), 0) From 39b2f6245c70a668e76e6c375054a10bd43662d3 Mon Sep 17 00:00:00 2001 From: BBC Date: Mon, 18 Jul 2022 20:05:05 -0400 Subject: [PATCH 54/54] Add class group in checkpoint path --- main_n_cls.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/main_n_cls.py b/main_n_cls.py index 07837f287..51d3f1d1d 100644 --- a/main_n_cls.py +++ b/main_n_cls.py @@ -30,7 +30,7 @@ parser.add_argument('--train_batch_size', type=int, default=128) parser.add_argument('--test_batch_size', type=int, default=100) parser.add_argument('--select_device', type=str, default='gpu', help='gpu | cpu') -parser.add_argument('--class_group', type=str, default='ORI, D2G0, D2G1, S2G0, S2G1, D4, S4') +parser.add_argument('--class_group', type=str, default='ORI', help='ORI, D2G0, D2G1, S2G0, S2G1, D4, S4') parser.add_argument('--num_class', type=int, default=10) parser.add_argument('--save_model_epoch_interval', type=int, default=10) parser.add_argument('--load_epoch', type=str, default='best', help='best | ') @@ -204,8 +204,8 @@ def count_layer_params(model, layer_name=nn.Conv2d): assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!' print('\n\ndevice: ', device) - checkpoint = torch.load('./checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ - format(args.net, args.num_class, args.load_epoch), map_location=device) + checkpoint = torch.load('./checkpoint/{}_{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.class_group, args.num_class, args.load_epoch), map_location=device) net.load_state_dict(checkpoint['net'], strict=False) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] @@ -305,8 +305,8 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_n_cls_{}_epoch_{}_ckpt.pth'.\ - format(args.net, args.num_class, str(epoch))) + torch.save(state, './checkpoint/{}_{}_n_cls_{}_epoch_{}_ckpt.pth'.\ + format(args.net, args.class_group, args.num_class, str(epoch))) best_acc = acc if acc > best_acc: print('Saving..') @@ -317,8 +317,8 @@ def test(epoch): } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') - torch.save(state, './checkpoint/{}_n_cls_{}_epoch_best_ckpt.pth'.\ - format(args.net, args.num_class)) + torch.save(state, './checkpoint/{}_{}_n_cls_{}_epoch_best_ckpt.pth'.\ + format(args.net, args.class_group, args.num_class)) best_acc = acc print('\n\nargs.train: ', args.train, ', args.test:', args.test)