-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy patheval-models.py
123 lines (88 loc) · 3.77 KB
/
eval-models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 22:38:30 2018
@author: Taha Emara @email: [email protected]
"""
import time
import numpy as np
import argparse
from addict import Dict
import yaml
import cv2
# PyTorch includes
import torch
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
# Custom includes
from dataloaders import cityscapes
from dataloaders import utils as dataloaders_utils
from models.liteseg import LiteSeg
from utils import iou_eval
from dataloaders import augmentation as augment
from utils.flops_counter import add_flops_counting_methods ,flops_to_string, get_model_parameters_number
#from models.backbone_networks.darknet import Darknet19
ap = argparse.ArgumentParser()
ap.add_argument('--backbone_network', required=False,
help = 'name of backbone network',default='shufflenet')#shufflenet, mobilenet, and darknet
ap.add_argument('-modpth', '--model_path', required=False,
help = 'path to pretrained model',default='pretrained_models/liteseg-shufflenet-cityscapes.pth')
CONFIG=Dict(yaml.load(open("config/training.yaml")))
args = ap.parse_args()
backbone_network=args.backbone_network
modelpath=args.model_path
#Net1=Darknet19(weights_file=CONFIG.PRETRAINED_DarkNET19)
#Net1.cuda()
#Net1.eval()
net=LiteSeg.build(backbone_network,modelpath,CONFIG,is_train=False)
net.eval()
if CONFIG.USING_GPU:
torch.cuda.set_device(device=CONFIG.GPU_ID)
net.cuda()
#burn-in with 200 images
for x in range(0,200):
image = torch.randn(1, 3, 360, 640).cuda()
with torch.no_grad():
output = net.forward(image)
#reporting results in fps:
total=0
for x in range(0,200):
image = torch.randn(1, 3, 360, 640).cuda()
with torch.no_grad():
a = time.perf_counter()
output = net.forward(image)
torch.cuda.synchronize()
b = time.perf_counter()
total+=b-a
print(str(200/total))
batch = torch.FloatTensor(1, 3, 512, 1024).cuda()
model = add_flops_counting_methods(net)
model.eval().start_flops_count()
_ = model(batch)
print('Flops: {}'.format(flops_to_string(model.compute_average_flops_cost())))
print('Params: ' + get_model_parameters_number(model))
composed_transforms_ts = transforms.Compose([
augment.RandomHorizontalFlip(),
augment.CenterCrop((512,1024)),
augment.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),#Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),#augment. Normalize_cityscapes(mean=(72.39, 82.91, 73.16)),#123.15, 115.90, 103.06 72.39, 82.91, 73.16
augment.ToTensor()])
cityscapes_val = cityscapes.Cityscapes(root=CONFIG.DATASET_FINE,extra=CONFIG.USING_COARSE,split='val', transform=composed_transforms_ts)
valloader = DataLoader(cityscapes_val, batch_size=1, shuffle=True, num_workers=0)
num_img_vl = len(valloader)
iev = iou_eval.Eval(20,19)
net.eval()
for ii, sample_batched in enumerate(valloader):#valloader
inputs, labels = sample_batched['image'], sample_batched['label']
inputs, labels = Variable(inputs, requires_grad=True), Variable(labels)
if CONFIG.USING_GPU:
inputs, labels = inputs.cuda(), labels.cuda()
with torch.no_grad():
outputs = net.forward(inputs)
predictions = torch.max(outputs, 1)[1]
y = torch.ones(labels.size()[2], labels.size()[3]).mul(19).cuda()
labels=labels.where(labels !=255, y)
iev.addBatch(predictions.unsqueeze(1).data,labels)
if ii % num_img_vl == num_img_vl - 1 :
print('Validation Result:')
print("MIOU",iev.getIoU())