forked from lvsn/deeptracking
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerate_synthetic_example.json
29 lines (23 loc) · 1.03 KB
/
generate_synthetic_example.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
{
# This is a list of all 3D models to render (support more than 1)
# [WARNING] : remove all the # comments to parse the json correctly!
"models": [
{
"name": "model_name",
"model_path": "path/to/geometry.ply",
"ambiant_occlusion_model": "path/to/ao.ply",
"object_width": "250" # cropping width of the model (mm)
}
],
"camera_path": "/path/to/camera.json",
"shader_path": "path/to/shader",
"output_path": "path/to/output",
"preload": "False", # True or False : if True will append to data already contained in output path else overwrite
"save_type": "numpy", # numpy or png, trade off between load speed and space
"sample_quantity": "100000", # quantity of sample per model
"image_size": "150", # pixel width/height of the samples
"translation_range": "0.02", # max translation (m)
"rotation_range": "10", # max rotation (degree)
"sphere_min_radius": "0.4", # min distance from camera
"sphere_max_radius": "1.5" # max distance from camera
}