forked from lvsn/deeptracking
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcompute_detection.py
50 lines (43 loc) · 2.16 KB
/
compute_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
"""
use a pose detector (aruco, checkboard) and compute the pose on the whole dataset
"""
from deeptracking.data.dataset import Dataset
from deeptracking.data.dataset_utils import image_blend
from deeptracking.data.modelrenderer import ModelRenderer, InitOpenGL
from deeptracking.utils.camera import Camera
from deeptracking.utils.transform import Transform
import cv2
import os
import numpy as np
from deeptracking.detector.detector_aruco import ArucoDetector
if __name__ == '__main__':
dataset_path = "/media/mathieu/e912e715-2be7-4fa2-8295-5c3ef1369dd0/dataset/deeptracking/sequences/skull"
detector_path = "../deeptracking/detector/aruco_layout.xml"
model_path = "/home/mathieu/Dataset/3D_models/skull/skull.ply"
model_ao_path = "/home/mathieu/Dataset/3D_models/skull/skull_ao.ply"
shader_path = "../deeptracking/data/shaders"
dataset = Dataset(dataset_path)
offset = Transform.from_matrix(np.load(os.path.join(dataset.path, "offset.npy")))
camera = Camera.load_from_json(dataset_path)
dataset.camera = camera
files = [f for f in os.listdir(dataset_path) if os.path.splitext(f)[-1] == ".png" and 'd' not in os.path.splitext(f)[0]]
detector = ArucoDetector(camera, detector_path)
window = InitOpenGL(camera.width, camera.height)
vpRender = ModelRenderer(model_path, shader_path, camera, window, (camera.width, camera.height))
vpRender.load_ambiant_occlusion_map(model_ao_path)
ground_truth_pose = None
for i in range(len(files)):
img = cv2.imread(os.path.join(dataset.path, "{}.png".format(i)))
detection = detector.detect(img)
if detection is not None:
ground_truth_pose = detection
ground_truth_pose.combine(offset.inverse(), copy=False)
else:
print("[WARN]: frame {} has not been detected.. using previous detection".format(i))
dataset.add_pose(None, None, ground_truth_pose)
rgb_render, depth_render = vpRender.render(ground_truth_pose.transpose())
bgr_render = rgb_render[:, :, ::-1].copy()
img = image_blend(bgr_render, img)
cv2.imshow("view", img)
cv2.waitKey(1)
dataset.save_json_files({"save_type": "png"})