Skip to content

Commit

Permalink
Merge branch 'main' into feat/ground_seg_annotation
Browse files Browse the repository at this point in the history
  • Loading branch information
nanoshimarobot committed Jan 18, 2025
2 parents 29553ae + 1845352 commit 15762bc
Show file tree
Hide file tree
Showing 49 changed files with 2,838 additions and 664 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ tests/scale_to_t4/data_test_scale_to_t4_converter/output_base
/data*/
/notebooks/data/
/tests/data
/label_data*

# ros
/src
Expand Down
24 changes: 24 additions & 0 deletions config/convert_fastlabel_to_t4.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
task: convert_fastlabel_to_t4
description:
visibility:
full: "No occlusion of the object."
most: "Object is occluded, but by less than 50%."
partial: "The object is occluded by more than 50% (but not completely)."
none: "The object is 90-100% occluded and no points/pixels are visible in the label."
camera_index:
CAM_FRONT_NARROW: 0
CAM_FRONT_WIDE: 1
CAM_FRONT_RIGHT: 2
CAM_BACK_RIGHT: 3
CAM_BACK_NARROW: 4
CAM_BACK_WIDE: 5
CAM_BACK_LEFT: 6
CAM_FRONT_LEFT: 7

conversion:
make_t4_dataset_dir: false # If true, the output directory includes t4_dataset directory (such as "scene_dir"/t4_dataset/data|annotation). If false, "scene_dir"/data|annotation.
input_base: ./data/non_annotated_t4_format
input_anno_base: ./data/fastlabel/pcd_annotation
output_base: ./data/t4_format
input_bag_base: null #optional
topic_list: null #necessary if input_bag_base is not null
Original file line number Diff line number Diff line change
Expand Up @@ -4,33 +4,40 @@ description:
conversion:
input_base: ./data/rosbag2
output_base: ./data/non_annotated_t4_format
world_frame_id: "map"
world_frame_id: "base_link"
start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0.
skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds.
skip_timestamp: 0.5 # Do not load data for the first point cloud timestamp for skip_timestamp seconds.
num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics.
accept_frame_drop: False # If true, the conversion will continue even if the LiDAR frame is dropped.
accept_frame_drop: true # If true, the conversion will continue even if the LiDAR frame is dropped.
undistort_image: true # If true, the camera image will be undistorted.
with_ins: true # whether to use INS messages as a ego state instead of `/tf`
with_vehicle_status: true # whether to generate `vehicle_state.json`
with_vehicle_status: false # whether to generate `vehicle_state.json`
# The following configuration is generally not modified unless there are changes to the vehicle sensor configuration.
lidar_sensor:
topic: /sensing/lidar/concatenated/pointcloud
channel: LIDAR_CONCAT
camera_sensors: # Keep the same order as each camera exposure timing
- topic: /sensing/camera/camera3/image_raw/compressed
channel: CAM_BACK_LEFT
delay_msec: 48.33
- topic: /sensing/camera/camera2/image_raw/compressed
channel: CAM_FRONT_LEFT
delay_msec: 65.0
- topic: /sensing/camera/camera0/image_raw/compressed
channel: CAM_FRONT
delay_msec: 81.67
- topic: /sensing/camera/camera4/image_raw/compressed
channel: CAM_FRONT_NARROW
delay_msec: 16.0
- topic: /sensing/camera/camera1/image_raw/compressed
channel: CAM_FRONT_WIDE
delay_msec: 16.0
- topic: /sensing/camera/camera2/image_raw/compressed
channel: CAM_FRONT_RIGHT
delay_msec: 98.33
- topic: /sensing/camera/camera5/image_raw/compressed
delay_msec: -9.0
- topic: /sensing/camera/camera3/image_raw/compressed
channel: CAM_BACK_RIGHT
delay_msec: 115.0
- topic: /sensing/camera/camera1/image_raw/compressed
channel: CAM_BACK
delay_msec: 131.67
delay_msec: -59.0
- topic: /sensing/camera/camera4/image_raw/compressed
channel: CAM_BACK_NARROW
delay_msec: -34.0
- topic: /sensing/camera/camera5/image_raw/compressed
channel: CAM_BACK_WIDE
delay_msec: -84.0
- topic: /sensing/camera/camera6/image_raw/compressed
channel: CAM_BACK_LEFT
delay_msec: -59.0
- topic: /sensing/camera/camera7/image_raw/compressed
channel: CAM_FRONT_LEFT
delay_msec: -59.0
16 changes: 16 additions & 0 deletions config/download_fastlabel_annotations.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
task: donwload_fastlabel
conversion:
output: label_data
access_token:
target_project_slug_keyword:
- 202405-lidar-3d-bbox
- 202405-panoptic-segmentation
- 202405-license-plate-person-head-bbox
- 202405-traffic-light-recognition
- 3d-lidar-bbox
- 3d-lidar-bbox
- 3d-lidar-bbox
- 3d-lidar-bbox
- panoptic-segmentation
- traffic-light-recognition
- license-plate-person-head
38 changes: 38 additions & 0 deletions config/label/object.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ bus: [bus, BUS, vehicle.bus, vehicle.bus (bendy & rigid)]
car:
[
car,
cars,
CAR,
vehicle.car,
vehicle.construction,
Expand All @@ -26,6 +27,8 @@ pedestrian:
pedestrian.adult,
pedestrian.child,
]
other_pedestrian: [other_pedestrian]
other_vehicle: [other_vehicle]
truck: [truck, TRUCK, vehicle.truck]
trailer: [trailer, TRAILER, vehicle.trailer]
ambulance: [ambulance, AMBULANCE, vehicle.ambulance]
Expand Down Expand Up @@ -53,3 +56,38 @@ stroller: [stroller, STROLLER, pedestrian.stroller]
police_officer: [police_officer, POLICE_OFFICER, pedestrian.police_officer]
wheelchair: [wheelchair, WHEELCHAIR, pedestrian.wheelchair]
forklift: [forklift, FORKLIFT]
train: [train, TRAIN]
cone: [cone]
curb: [curb]
gate: [gate]
guide_post: [guide_post]
construction: [constructions, construction]
traffic_sign: [traffic_sign]
road_debris: [road_debris]
other_obstacle: [other_obstacle]
obstacle_others: [obstacle_others]
laneline_solid_white: [laneline_solid_white]
laneline_dash_white: [laneline_dash_white]
laneline_solid_yellow: [laneline_solid_yellow]
laneline_dash_yellow: [laneline_dash_yellow]
laneline_solid_green: [laneline_solid_green]
laneline_solid_red: [laneline_solid_red]
deceleration_line: [deceleration_line]
dashed_lane_marking: [dashed_lane_markings, dash_lane_markings, dashed_lane_marking, dash_white_merge, dash_white_branch]
stopline: [stopline]
crosswalk: [crosswalk]
marking_character: [marking_character]
marking_arrow: [marking_arrow]
striped_road_marking: [striped_road_markings, striped_road_marking]
parking_lot: [parking_lot]
marking_other: [marking_other]
road: [road]
road_paint_lane_solid_white: [road_paint_lane_solid_white]
road_paint_lane_dash_white: [road_paint_lane_dash_white]
sidewalk: [sidewalk]
building: [building, buildling] # typo
wall/fence: [wall_fence, wall/fence]
pole: [pole]
vegetation/terrain: [vegetation_terrain, vegetation/terrain]
sky: [sky]
traffic_light: [traffic_light]
2 changes: 2 additions & 0 deletions config/label/surface.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,10 @@
- sidewalk
- building
- wall_fence
- wall/fence
- pole
- vegetation_terrain
- vegetation/terrain
- sky
- road_paint_lane_solid_white
- road_paint_lane_dash_white
23 changes: 23 additions & 0 deletions config/update_t4_with_fastlabel_sample.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
task: update_t4_with_fastlabel
description:
visibility:
full: "No occlusion of the object."
most: "Object is occluded, but by less than 50%."
partial: "The object is occluded by more than 50% (but not completely)."
none: "The object is 90-100% occluded and no points/pixels are visible in the label."
camera_index:
CAM_FRONT_NARROW: 0
CAM_FRONT_WIDE: 1
CAM_FRONT_RIGHT: 2
CAM_BACK_RIGHT: 3
CAM_BACK_NARROW: 4
CAM_BACK_WIDE: 5
CAM_BACK_LEFT: 6
CAM_FRONT_LEFT: 7
surface_categories: ./config/label/surface.yaml

conversion:
make_t4_dataset_dir: false # If true, the output directory includes t4_dataset directory (such as "scene_dir"/t4_dataset/data|annotation). If false, "scene_dir"/data|annotation.
input_base: ./data/input_t4_format # could be non_annotated_t4_format or t4_format_3d_annotated
input_anno_base: ./data/fastlabel
output_base: ./data/output_t4_format # currently, this only includes the 2D annotations
47 changes: 47 additions & 0 deletions docs/t4_format_3d_detailed.md
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,53 @@ Ego_pose represents the pose of the ego vehicle at a specific timestamp.
This includes both the vehicle's position and orientation in space, typically referenced in a global coordinate system such as the map or odometry frame.
The concept is based on [nuScenes](https://arxiv.org/pdf/1903.11027) and [nuPlan](https://nuplan-devkit.readthedocs.io/en/latest/nuplan_schema.html#ego-pose).

#### Recording Options

There are 2 options to retrieve the corresponding ego pose record, which are using `/tf` or INS message.

Note that during recording, each record is result in interpolating at the timestamp corresponding to the particular sensor data.

##### With `/tf` message

In this option, it is assumed that input RosBag contains `/tf` message.

> [!WARNING]
> This option does not record `velocity`, `acceleration` and `geocoordinate` fields.

In order to enable to create ego pose records using `/tf` message, set `with_ins: false` in your configuration:

```yaml
task: convert_rosbag2_to_non_annotated_t4
description:
scene: ""
conversion:
...
with_ins: false # use `/tf` message
...
```

##### With INS messages

In this option, it is assumed that input RosBag contains following messages, which are related to INS.

| Topic | Type | Description |
| :---------------------: | :-------------------------: | -------------------------------------------------------------------- |
| `/ins/oxts/odometry` | `nav_msgs/msg/Odometry` | An estimate of a position and velocity in free space. |
| `/ins/oxts/imu` | `sensor_msgs/msg/Imu` | An IMU (Internal Measurement Unit) data. |
| `/ins/oxts/nav_sat_fix` | `sensor_msgs/msg/NavSatFix` | Navigation Satellite fix for any Global Navigation Satellite System. |

In order to enable to create ego pose records using INS message, set `with_ins: true` in your configuration:

```yaml
task: convert_rosbag2_to_non_annotated_t4
description:
scene: ""
conversion:
...
with_ins: true # use INS messages
...
```

#### Items

- ego_pose
Expand Down
56 changes: 29 additions & 27 deletions docs/tools_overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -177,18 +177,20 @@ output: T4 format data

This function is for converting a synthetic bag to T4 format.
Synthetic bag must contain ground truth objects, pointclouds and tf.
`ground_truth/objects` can be either `autoware_auto_perception_msgs/msg/DetectedObjects` or `autoware_auto_perception_msgs/msg/TrackedObjects`.
`ground_truth/objects` can be either `autoware_perception_msgs/msg/DetectedObjects` or `autoware_perception_msgs/msg/TrackedObjects`.

Note that `autoware_auto_perception_msgs` is going to be deprecated (timeline TBD).

#### Messages

| Topic Name | Required | Message Type |
| ----------------------------------------------------------- | -------- | --------------------------------------------------- |
| `/ground_truth/filtered/objects` or `/ground_truth/objects` | o | `autoware_auto_perception_msgs/msg/DetectedObjects` |
| `/sensing/lidar/concatenated/pointcloud` | o | `sensor_msgs/msg/PointCloud2` |
| `/tf` | o | `tf2_msgs/msg/TFMessage` |
| `/tf_static` | o | `tf2_msgs/msg/TFMessage` |
| | | `sensor_msgs/msg/CompressedImage` |
| | | `sensor_msgs/msg/CameraInfo` |
| Topic Name | Required | Message Type |
| ----------------------------------------------------------- | -------- | ---------------------------------------------- |
| `/ground_truth/filtered/objects` or `/ground_truth/objects` | o | `autoware_perception_msgs/msg/DetectedObjects` |
| `/sensing/lidar/concatenated/pointcloud` | o | `sensor_msgs/msg/PointCloud2` |
| `/tf` | o | `tf2_msgs/msg/TFMessage` |
| `/tf_static` | o | `tf2_msgs/msg/TFMessage` |
| | | `sensor_msgs/msg/CompressedImage` |
| | | `sensor_msgs/msg/CameraInfo` |

#### script

Expand All @@ -203,15 +205,15 @@ output: T4 format data

#### Messages

| Topic Name | Required | Message Type |
| --------------------------------------------- | -------- | -------------------------------------------------- |
| `/ground_truth/objects` | o | `autoware_auto_perception_msgs/msg/TrackedObjects` |
| `/sensing/camera/camera{ID}/camera_info` | o | `visualization_msgs/msg/MarkerArray` |
| `/sensing/lidar/concatenated/pointcloud` | o | `sensor_msgs/msg/PointCloud2` |
| `/tf` | o | `tf2_msgs/msg/TFMessage` |
| `/tf_static` | o | `tf2_msgs/msg/TFMessage` |
| `/sensing/camera/camera{ID}/image_rect_color` | o | `sensor_msgs/msg/Image` |
| `/sensing/camera/camera{ID}/camera_info` | o | `sensor_msgs/msg/CameraInfo` |
| Topic Name | Required | Message Type |
| --------------------------------------------- | -------- | --------------------------------------------- |
| `/ground_truth/objects` | o | `autoware_perception_msgs/msg/TrackedObjects` |
| `/sensing/camera/camera{ID}/camera_info` | o | `visualization_msgs/msg/MarkerArray` |
| `/sensing/lidar/concatenated/pointcloud` | o | `sensor_msgs/msg/PointCloud2` |
| `/tf` | o | `tf2_msgs/msg/TFMessage` |
| `/tf_static` | o | `tf2_msgs/msg/TFMessage` |
| `/sensing/camera/camera{ID}/image_rect_color` | o | `sensor_msgs/msg/Image` |
| `/sensing/camera/camera{ID}/camera_info` | o | `sensor_msgs/msg/CameraInfo` |

#### script

Expand All @@ -224,21 +226,21 @@ python -m perception_dataset.convert --config config/rosbag2_to_t4/convert_synth
#### Description

This function is for converting a pseudo-labeled bag to T4 format.
The pseudo-labeled bag contains either detection output or tracking output from Autoware. The detection output is a message of `autoware_auto_perception_msgs/msg/DetectedObjects`, and the tracking output is a message of `autoware_auto_perception_msgs/msg/TrackedObjects`.
The pseudo-labeled bag contains either detection output or tracking output from Autoware. The detection output is a message of `autoware_perception_msgs/msg/DetectedObjects`, and the tracking output is a message of `autoware_perception_msgs/msg/TrackedObjects`.

input: rosbag2
output: T4 format data

#### Messages

| Topic Name | Required | Message Type |
| -------------------------------------------------------------------------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------------- |
| `/perception/object_recognition/detection/objects` or `/perception/object_recognition/tracking/objects` or other any value | o | `autoware_auto_perception_msgs/msg/TrackedObjects` or `autoware_auto_perception_msgs/msg/DetectedObjects` |
| `/sensing/lidar/concatenated/pointcloud` or other any value | o | `sensor_msgs/msg/PointCloud2` |
| `/tf` | o | `tf2_msgs/msg/TFMessage` |
| `/tf_static` | o | `tf2_msgs/msg/TFMessage` |
| `/sensing/camera/camera{ID}/image_rect_color/compressed` | | `sensor_msgs/msg/CompressedImage` |
| `/sensing/camera/camera{ID}/camera_info` | | `sensor_msgs/msg/CameraInfo` |
| Topic Name | Required | Message Type |
| -------------------------------------------------------------------------------------------------------------------------- | -------- | ----------------------------------------------------------------------------------------------- |
| `/perception/object_recognition/detection/objects` or `/perception/object_recognition/tracking/objects` or other any value | o | `autoware_perception_msgs/msg/TrackedObjects` or `autoware_perception_msgs/msg/DetectedObjects` |
| `/sensing/lidar/concatenated/pointcloud` or other any value | o | `sensor_msgs/msg/PointCloud2` |
| `/tf` | o | `tf2_msgs/msg/TFMessage` |
| `/tf_static` | o | `tf2_msgs/msg/TFMessage` |
| `/sensing/camera/camera{ID}/image_rect_color/compressed` | | `sensor_msgs/msg/CompressedImage` |
| `/sensing/camera/camera{ID}/camera_info` | | `sensor_msgs/msg/CameraInfo` |

#### script

Expand Down
8 changes: 8 additions & 0 deletions perception_dataset/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,14 @@ class SENSOR_ENUM(Enum):
"channel": "CAM_BACK",
"modality": SENSOR_MODALITY_ENUM.CAMERA.value,
}
CAM_BACK_NARROW = {
"channel": "CAM_BACK_NARROW",
"modality": SENSOR_MODALITY_ENUM.CAMERA.value,
}
CAM_BACK_WIDE = {
"channel": "CAM_BACK_WIDE",
"modality": SENSOR_MODALITY_ENUM.CAMERA.value,
}
CAM_FRONT_LEFT = {
"channel": "CAM_FRONT_LEFT",
"modality": SENSOR_MODALITY_ENUM.CAMERA.value,
Expand Down
Loading

0 comments on commit 15762bc

Please sign in to comment.