-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference_openvino.py
78 lines (53 loc) · 2.58 KB
/
inference_openvino.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import torch
import argparse
from torch.nn.functional import sigmoid, softmax
import onnx
from openvino.runtime import Core
from object_detection import models_inference
from object_detection.utils import load_pretrained, img_preprocess_inference, nms_img, show_box
from core.settings import train_config,model_config
device = train_config.device
## important
#change device to cpu in config
def convert_onnx(model, onnx_path, input_img):
torch.onnx.export(model, input_img, onnx_path, input_names=['input'], output_names=['output'], export_params=True)
onnx_model = onnx.load(onnx_path)
onnx.checker.check_model(onnx_model)
return
def convert_openvino(onnx_path):
core = Core()
model_onnx = core.read_model(model=onnx_path)
model_openvino = core.compile_model(model=model_onnx, device_name="AUTO")
return model_openvino
def inference_test(img_path : str, model_path : str):
#load model
model = models_inference.VitModel().to(device)
model, step_all, epo, lr = load_pretrained(model, model_path, device)
model.eval()
#prepare input image
img = img_preprocess_inference(img_path)
img = img.to(device)
#for tensorrt : first convert to onnx then convert onnx to tensorrt
onnx_path = "./model_onnx.onnx"
convert_onnx(model, onnx_path, img)
model_openvino = convert_openvino(onnx_path)
out = model_openvino([img.numpy()])[0]
out = torch.tensor(out)
#postprocessing - sigmoid for object
obj_out = sigmoid(out[:,:,0])
#softmax for class
class_out = softmax(out[:,:,1:model_config.class_num+1], dim=-1)
#bound [0,1] for bbox
box_out = out[:,:,model_config.class_num+1:]
box_out = torch.minimum(torch.tensor([1]).to(device), torch.maximum(torch.tensor([0]).to(device), box_out.to(device)))
return obj_out[0].detach().cpu().numpy(), class_out[0].detach().cpu().numpy(), box_out[0].detach().cpu().numpy()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--img_path", type=str, required=True)
parser.add_argument("--model_path", type=str, required=True)
parser.add_argument("--out_path", type=str, required=True)
args = parser.parse_args()
obj_out, class_out, box_out = inference_test(args.img_path, args.model_path)
obj_score_list_final, class_list_final, class_score_list_final, box_list_final, xy_list_final = nms_img(obj_out, class_out, box_out)
print(obj_score_list_final, class_list_final, class_score_list_final, box_list_final)
show_box(args.img_path, class_list_final, box_list_final, args.out_path)