From 6c1f98c18179eb95ac79f9942ade70135ec76597 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 31 May 2024 00:22:25 +0000 Subject: [PATCH 1/6] WIP camera post synchronization --- .../post_synchronizer.py | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py diff --git a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py new file mode 100644 index 00000000..1e7b764f --- /dev/null +++ b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py @@ -0,0 +1,54 @@ +import rclpy +from rclpy.node import Node +from message_filters import Subscriber, ApproximateTimeSynchronizer +from vision_msgs.msg import Detection2DArray, Detection2D +from std_msgs.msg import Header + +class CameraSyncNode(Node): + def __init__(self): + super().__init__('camera_sync_node') + + self.camera1_sub = Subscriber(self, Detection2DArray, '/camera/left/camera_detections') + self.camera2_sub = Subscriber(self, Detection2DArray, '/camera/center/camera_detections') + self.camera3_sub = Subscriber(self, Detection2DArray, '/camera/right/camera_detections') + + self.ts = ApproximateTimeSynchronizer( + [self.camera1_sub, self.camera2_sub, self.camera3_sub], + queue_size=10, + slop=0.1) + + self.ts.registerCallback(self.callback) + + self.combined_detection_publisher = self.create_publisher(Detection2DArray, '/combined_detections', 10) + + def callback(self, camera1_msg, camera2_msg, camera3_msg): + combined_detections = Detection2DArray() + combined_detections.header = Header() + combined_detections.header.stamp = self.get_clock().now().to_msg() + combined_detections.header.frame_id = camera1_msg.header.frame_id + + for detection in camera1_msg.detections: + combined_detections.detections.append(detection) + + for detection in camera2_msg.detections: + combined_detections.detections.append(detection) + + for detection in camera3_msg.detections: + combined_detections.detections.append(detection) + + self.combined_detection_publisher.publish(combined_detections) + +def main(args=None): + rclpy.init(args=args) + node = CameraSyncNode() + + try: + rclpy.spin(node) + except KeyboardInterrupt: + pass + + node.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() \ No newline at end of file From 4882626c84de45d8ee3bbfb0ee807e16a0ad74b8 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 31 May 2024 00:23:57 +0000 Subject: [PATCH 2/6] removing ctr --- .../camera_object_detection/post_synchronizer.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py index 1e7b764f..e2539542 100644 --- a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py +++ b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py @@ -42,10 +42,7 @@ def main(args=None): rclpy.init(args=args) node = CameraSyncNode() - try: - rclpy.spin(node) - except KeyboardInterrupt: - pass + rclpy.spin(node) node.destroy_node() rclpy.shutdown() From 7e4aaa34d628c0ff761fa060357579b106ac42e0 Mon Sep 17 00:00:00 2001 From: Mark Chiu Date: Tue, 4 Jun 2024 00:20:10 +0000 Subject: [PATCH 3/6] Use correct traffic signs model --- modules/docker-compose.perception.yaml | 4 ++-- .../camera_object_detection/config/traffic_signs_config.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index d30474f9..b81eac61 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -29,11 +29,11 @@ services: - driver: nvidia count: 1 capabilities: [ gpu ] - command: /bin/bash -c "ros2 launch camera_object_detection eve_launch.py" + command: /bin/bash -c "ros2 launch camera_object_detection eve.launch.py" volumes: - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt - - /mnt/wato-drive2/perception_models/traffic_signs_v0.pt:/perception_models/traffic_signs_v1.pt + - /mnt/wato-drive2/perception_models/traffic_signs_v3.pt:/perception_models/traffic_signs.pt lidar_object_detection: build: diff --git a/src/perception/camera_object_detection/config/traffic_signs_config.yaml b/src/perception/camera_object_detection/config/traffic_signs_config.yaml index 8a143a77..ac312d1a 100644 --- a/src/perception/camera_object_detection/config/traffic_signs_config.yaml +++ b/src/perception/camera_object_detection/config/traffic_signs_config.yaml @@ -3,7 +3,7 @@ traffic_signs_node: camera_topic: /camera/left/image_color publish_vis_topic: /traffic_signs_viz publish_detection_topic: /traffic_signs - model_path: /perception_models/traffic_signs_v1.pt + model_path: /perception_models/traffic_signs.pt crop_mode: CenterCrop image_size: 1024 save_detections: false From c218956f7aaa2bc1a1a7caffcadaa3655e1fb923 Mon Sep 17 00:00:00 2001 From: Mark Chiu Date: Tue, 4 Jun 2024 00:21:00 +0000 Subject: [PATCH 4/6] Add PoC work for Synchronizer node --- .../post_synchronizer.py | 68 ++++++++++++++++++- .../yolov8_detection.py | 14 ++-- 2 files changed, 72 insertions(+), 10 deletions(-) diff --git a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py index e2539542..46d5ac62 100644 --- a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py +++ b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py @@ -1,31 +1,85 @@ import rclpy from rclpy.node import Node from message_filters import Subscriber, ApproximateTimeSynchronizer + +from sensor_msgs.msg import Image from vision_msgs.msg import Detection2DArray, Detection2D from std_msgs.msg import Header + +from ultralytics.utils.plotting import Annotator, colors + +from cv_bridge import CvBridgeError -class CameraSyncNode(Node): +class CameraSyncNode(Node): # synchronizes visualizations def __init__(self): super().__init__('camera_sync_node') + + self.camera_img_sub = Subscriber(self, Image , '/camera/right/image_color') self.camera1_sub = Subscriber(self, Detection2DArray, '/camera/left/camera_detections') self.camera2_sub = Subscriber(self, Detection2DArray, '/camera/center/camera_detections') self.camera3_sub = Subscriber(self, Detection2DArray, '/camera/right/camera_detections') self.ts = ApproximateTimeSynchronizer( - [self.camera1_sub, self.camera2_sub, self.camera3_sub], + [self.camera_img_sub, self.camera1_sub, self.camera2_sub, self.camera3_sub], queue_size=10, slop=0.1) self.ts.registerCallback(self.callback) self.combined_detection_publisher = self.create_publisher(Detection2DArray, '/combined_detections', 10) + self.vis_publisher = self.create_publisher(Image, '/annotated_img') + + def process_img(self, image): + try: + cv_image = self.cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough") + except CvBridgeError as e: + self.get_logger().error(str(e)) + return + return cv_image + + def postprocess_detections(self, detections, annotator): + """ + Post-process draws bouningboxes on camera image. + + Parameters: + detections: A list of dict with the format + { + "label": str, + "bbox": [float], + "conf": float + } + annotator: A ultralytics.yolo.utils.plotting.Annotator for the current image + + Returns: + processed_detections: filtered detections + annotator_img: image with bounding boxes drawn on + """ + processed_detections = detections + + for det in detections: + label = f'{det["label"]} {det["conf"]:.2f}' + x1, y1, w1, h1 = det["bbox"] + xyxy = [x1, y1, x1 + w1, y1 + h1] + annotator.box_label(xyxy, label, color=colors(1, True)) + + annotator_img = annotator.result() + return (processed_detections, annotator_img) + + def publish_vis(self, annotated_img, msg): + # Publish visualizations + imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8") + imgmsg.header.stamp = msg.header.stamp + imgmsg.header.frame_id = msg.header.frame_id + self.vis_publisher.publish(imgmsg) - def callback(self, camera1_msg, camera2_msg, camera3_msg): + def callback(self, camera_img_sub, camera1_msg, camera2_msg, camera3_msg): combined_detections = Detection2DArray() combined_detections.header = Header() combined_detections.header.stamp = self.get_clock().now().to_msg() combined_detections.header.frame_id = camera1_msg.header.frame_id + + cv_image = self.process_img(camera_img_sub) for detection in camera1_msg.detections: combined_detections.detections.append(detection) @@ -35,8 +89,16 @@ def callback(self, camera1_msg, camera2_msg, camera3_msg): for detection in camera3_msg.detections: combined_detections.detections.append(detection) + + annotator = Annotator( + cv_image, + line_width=self.line_thickness, + example=str(self.names), + ) + (combined_detections, annotated_img) = self.postprocess_detections(combined_detections, annotator) self.combined_detection_publisher.publish(combined_detections) + self.publish_vis(annotated_img, camera_img_sub) def main(args=None): rclpy.init(args=args) diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index 05ceb431..c01e408e 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -281,16 +281,16 @@ def image_callback(self, msg): ) self.get_logger().debug(f"{label}: {bbox}") - annotator = Annotator( - cv_image, - line_width=self.line_thickness, - example=str(self.names), - ) - (detections, annotated_img) = self.postprocess_detections(detections, annotator) + # annotator = Annotator( + # cv_image, + # line_width=self.line_thickness, + # example=str(self.names), + # ) + # (detections, annotated_img) = self.postprocess_detections(detections, annotator) # Currently we support a single camera so we pass an empty string feed = "" - self.publish_vis(annotated_img, msg, feed) + # self.publish_vis(annotated_img, msg, feed) self.publish_detections(detections, msg, feed) if self.save_detections: From f42f5f7c373dccb7a595dae51048c130a6e45553 Mon Sep 17 00:00:00 2001 From: Mark Chiu Date: Fri, 14 Jun 2024 01:53:17 +0000 Subject: [PATCH 5/6] Add proper image annotations for post sync node --- .../post_synchronizer.py | 9 ++++---- .../config/post_synchronizer_config.yaml | 4 ++++ .../launch/eve.launch.py | 7 +++++++ .../include/post_synchronizer.launch.py | 21 +++++++++++++++++++ .../camera_object_detection/setup.py | 3 ++- 5 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 src/perception/camera_object_detection/config/post_synchronizer_config.yaml create mode 100644 src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py diff --git a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py index 46d5ac62..88537e0b 100644 --- a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py +++ b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py @@ -13,12 +13,13 @@ class CameraSyncNode(Node): # synchronizes visualizations def __init__(self): super().__init__('camera_sync_node') + self.get_logger().info("Camera Sync Node") self.camera_img_sub = Subscriber(self, Image , '/camera/right/image_color') - self.camera1_sub = Subscriber(self, Detection2DArray, '/camera/left/camera_detections') - self.camera2_sub = Subscriber(self, Detection2DArray, '/camera/center/camera_detections') - self.camera3_sub = Subscriber(self, Detection2DArray, '/camera/right/camera_detections') + self.camera1_sub = Subscriber(self, Detection2DArray, '/camera/right/camera_detections') + self.camera2_sub = Subscriber(self, Detection2DArray, '/traffic_signs') + self.camera3_sub = Subscriber(self, Detection2DArray, '/traffic_lights') self.ts = ApproximateTimeSynchronizer( [self.camera_img_sub, self.camera1_sub, self.camera2_sub, self.camera3_sub], @@ -28,7 +29,7 @@ def __init__(self): self.ts.registerCallback(self.callback) self.combined_detection_publisher = self.create_publisher(Detection2DArray, '/combined_detections', 10) - self.vis_publisher = self.create_publisher(Image, '/annotated_img') + self.vis_publisher = self.create_publisher(Image, '/annotated_img', 10) def process_img(self, image): try: diff --git a/src/perception/camera_object_detection/config/post_synchronizer_config.yaml b/src/perception/camera_object_detection/config/post_synchronizer_config.yaml new file mode 100644 index 00000000..81bb91ef --- /dev/null +++ b/src/perception/camera_object_detection/config/post_synchronizer_config.yaml @@ -0,0 +1,4 @@ +post_synchronizer_node: + ros__parameters: + camera_topic: /camera/right/image_color + publish_vis_topic: /annotated_img diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py index 9ff4992e..19af41a0 100755 --- a/src/perception/camera_object_detection/launch/eve.launch.py +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -47,11 +47,18 @@ def generate_launch_description(): condition=LaunchConfigurationEquals("launch_traffic_signs", "True"), ) + post_synchronizer_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/post_synchronizer.launch.py"] + ) + ) + return LaunchDescription( launch_args + [ pretrained_yolov8_launch, traffic_light_launch, traffic_signs_launch, + post_synchronizer_launch, ] ) diff --git a/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py b/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py new file mode 100644 index 00000000..606aeaf0 --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py @@ -0,0 +1,21 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory('camera_object_detection'), + 'config', + 'post_synchronizer_config.yaml' + ) + + camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_sync_node', + name='post_synchronizer_node', + parameters=[config] + ) + + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/setup.py b/src/perception/camera_object_detection/setup.py index c9665f79..cddea271 100755 --- a/src/perception/camera_object_detection/setup.py +++ b/src/perception/camera_object_detection/setup.py @@ -25,7 +25,8 @@ tests_require=['pytest'], entry_points={ 'console_scripts': [ - 'camera_object_detection_node = camera_object_detection.yolov8_detection:main' + 'camera_object_detection_node = camera_object_detection.yolov8_detection:main', + 'camera_sync_node = camera_object_detection.post_synchronizer:main' ], }, ) From e119e011440f4ad2d9ae4a299998fd04e2f77e97 Mon Sep 17 00:00:00 2001 From: Mark Chiu Date: Tue, 25 Jun 2024 00:59:57 +0000 Subject: [PATCH 6/6] create launch files and fix post synchronizer for combined detections --- .../post_synchronizer.py | 114 +++++++++++++----- .../config/post_synchronizer_config.yaml | 28 ++++- .../config/traffic_light_config.yaml | 28 ++++- .../config/traffic_signs_config.yaml | 24 +++- .../include/post_synchronizer.launch.py | 26 +++- .../launch/include/traffic_light.launch.py | 26 +++- .../launch/include/traffic_signs.launch.py | 26 +++- 7 files changed, 227 insertions(+), 45 deletions(-) diff --git a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py index 88537e0b..d591c6d8 100644 --- a/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py +++ b/src/perception/camera_object_detection/camera_object_detection/post_synchronizer.py @@ -3,33 +3,53 @@ from message_filters import Subscriber, ApproximateTimeSynchronizer from sensor_msgs.msg import Image -from vision_msgs.msg import Detection2DArray, Detection2D +from vision_msgs.msg import Detection2DArray, Detection2D, ObjectHypothesisWithPose from std_msgs.msg import Header from ultralytics.utils.plotting import Annotator, colors -from cv_bridge import CvBridgeError +from cv_bridge import CvBridge, CvBridgeError class CameraSyncNode(Node): # synchronizes visualizations def __init__(self): super().__init__('camera_sync_node') self.get_logger().info("Camera Sync Node") - self.camera_img_sub = Subscriber(self, Image , '/camera/right/image_color') - - self.camera1_sub = Subscriber(self, Detection2DArray, '/camera/right/camera_detections') - self.camera2_sub = Subscriber(self, Detection2DArray, '/traffic_signs') - self.camera3_sub = Subscriber(self, Detection2DArray, '/traffic_lights') + self.declare_parameter("camera_img_topic", "/camera/right/image_color") + self.declare_parameter("camera_detection_topic", "/camera/right/camera_detections") + self.declare_parameter("traffic_signs_topic", "/traffic_signs/right") + self.declare_parameter("traffic_lights_topic", "/traffic_lights/right") + self.declare_parameter("combined_detection_publisher", "/camera/right/combined_detections") + self.declare_parameter("publish_vis_topic", "/camera/right/annotated_img") + + self.camera_img_topic = self.get_parameter("camera_img_topic").value + self.camera_detection_topic = self.get_parameter("camera_detection_topic").value + self.traffic_signs_topic = self.get_parameter("traffic_signs_topic").value + self.traffic_lights_topic = self.get_parameter("traffic_lights_topic").value + self.combined_detection_topic = self.get_parameter("combined_detection_publisher").value + self.publish_vis_topic = self.get_parameter("publish_vis_topic").value + + self.camera_img_sub = Subscriber(self, Image, self.camera_img_topic) + self.camera_detection_sub = Subscriber(self, Detection2DArray, self.camera_detection_topic) + self.traffic_signs_sub = Subscriber(self, Detection2DArray, self.traffic_signs_topic) + self.traffic_lights_sub = Subscriber(self, Detection2DArray, self.traffic_lights_topic) + + self.cv_bridge = CvBridge() + self.line_thickness = 1 + self.names = [] self.ts = ApproximateTimeSynchronizer( - [self.camera_img_sub, self.camera1_sub, self.camera2_sub, self.camera3_sub], + [self.camera_img_sub, + self.camera_detection_sub, + self.traffic_signs_sub, + self.traffic_lights_sub], queue_size=10, slop=0.1) self.ts.registerCallback(self.callback) - self.combined_detection_publisher = self.create_publisher(Detection2DArray, '/combined_detections', 10) - self.vis_publisher = self.create_publisher(Image, '/annotated_img', 10) + self.combined_detection_publisher = self.create_publisher(Detection2DArray, self.combined_detection_topic, 10) + self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10) def process_img(self, image): try: @@ -67,38 +87,76 @@ def postprocess_detections(self, detections, annotator): annotator_img = annotator.result() return (processed_detections, annotator_img) + def publish_detections(self, detections, msg, feed): + # Publish detections to an detectionList message + detection2darray = Detection2DArray() + + # fill header for detection list + detection2darray.header.stamp = msg.header.stamp + detection2darray.header.frame_id = msg.header.frame_id + # populate detection list + if detections is not None and len(detections): + for detection in detections: + detection2d = Detection2D() + detection2d.header.stamp = self.get_clock().now().to_msg() + detection2d.header.frame_id = msg.header.frame_id + detected_object = ObjectHypothesisWithPose() + detected_object.hypothesis.class_id = detection["label"] + detected_object.hypothesis.score = detection["conf"] + detection2d.results.append(detected_object) + detection2d.bbox.center.position.x = detection["bbox"][0] + detection2d.bbox.center.position.y = detection["bbox"][1] + detection2d.bbox.size_x = detection["bbox"][2] + detection2d.bbox.size_y = detection["bbox"][3] + + # append detection to detection list + detection2darray.detections.append(detection2d) + + self.combined_detection_publisher.publish(detection2darray) + def publish_vis(self, annotated_img, msg): # Publish visualizations imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8") imgmsg.header.stamp = msg.header.stamp imgmsg.header.frame_id = msg.header.frame_id + self.get_logger().info("Publish img") self.vis_publisher.publish(imgmsg) - def callback(self, camera_img_sub, camera1_msg, camera2_msg, camera3_msg): - combined_detections = Detection2DArray() - combined_detections.header = Header() - combined_detections.header.stamp = self.get_clock().now().to_msg() - combined_detections.header.frame_id = camera1_msg.header.frame_id - + def callback( + self, + camera_img_sub, + camera_detection_sub, + traffic_signs_sub, + traffic_lights_sub + ): cv_image = self.process_img(camera_img_sub) - - for detection in camera1_msg.detections: - combined_detections.detections.append(detection) - - for detection in camera2_msg.detections: - combined_detections.detections.append(detection) - - for detection in camera3_msg.detections: - combined_detections.detections.append(detection) + + combined_detections = camera_detection_sub.detections + traffic_lights_sub.detections + traffic_signs_sub.detections annotator = Annotator( cv_image, line_width=self.line_thickness, example=str(self.names), ) - (combined_detections, annotated_img) = self.postprocess_detections(combined_detections, annotator) - - self.combined_detection_publisher.publish(combined_detections) + + detections = [] + for det in combined_detections: + obj_with_pose = det.results[0] + detections.append( + { + "label": obj_with_pose.hypothesis.class_id, + "conf": obj_with_pose.hypothesis.score, + "bbox": [ + det.bbox.center.position.x, + det.bbox.center.position.y, + det.bbox.size_x, + det.bbox.size_y + ], + } + ) + (combined_detections, annotated_img) = self.postprocess_detections(detections, annotator) + + self.publish_detections(combined_detections, camera_detection_sub, "") self.publish_vis(annotated_img, camera_img_sub) def main(args=None): diff --git a/src/perception/camera_object_detection/config/post_synchronizer_config.yaml b/src/perception/camera_object_detection/config/post_synchronizer_config.yaml index 81bb91ef..9bee55fd 100644 --- a/src/perception/camera_object_detection/config/post_synchronizer_config.yaml +++ b/src/perception/camera_object_detection/config/post_synchronizer_config.yaml @@ -1,4 +1,26 @@ -post_synchronizer_node: +left_post_synchronizer_node: ros__parameters: - camera_topic: /camera/right/image_color - publish_vis_topic: /annotated_img + camera_img_topic: /camera/left/image_color + camera_detection_topic: /camera/left/camera_detections + traffic_signs_topic: /traffic_signs/left + traffic_lights_topic: /traffic_lights/left + combined_detection_publisher: /camera/left/combined_detections + publish_vis_topic: /camera/left/annotated_img + +center_post_synchronizer_node: + ros__parameters: + camera_img_topic: /camera/center/image_color + camera_detection_topic: /camera/center/camera_detections + traffic_signs_topic: /traffic_signs/center + traffic_lights_topic: /traffic_lights/center + combined_detection_publisher: /camera/center/combined_detections + publish_vis_topic: /camera/center/annotated_img + +right_post_synchronizer_node: + ros__parameters: + camera_img_topic: /camera/right/image_color + camera_detection_topic: /camera/right/camera_detections + traffic_signs_topic: /traffic_signs/right + traffic_lights_topic: /traffic_lights/right + combined_detection_publisher: /camera/right/combined_detections + publish_vis_topic: /camera/right/annotated_img diff --git a/src/perception/camera_object_detection/config/traffic_light_config.yaml b/src/perception/camera_object_detection/config/traffic_light_config.yaml index 9a3e00c8..6306cff3 100755 --- a/src/perception/camera_object_detection/config/traffic_light_config.yaml +++ b/src/perception/camera_object_detection/config/traffic_light_config.yaml @@ -1,9 +1,31 @@ -traffic_light_node: +left_traffic_light_node: ros__parameters: - camera_topic: /camera/left/image_color + camera_topic: /camera/left/image_color + publish_vis_topic: /traffic_lights_viz + publish_detection_topic: /traffic_lights/left + model_path: /perception_models/traffic_light.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false + +center_traffic_light_node: + ros__parameters: + camera_topic: /camera/center/image_color + publish_vis_topic: /traffic_lights_viz + publish_detection_topic: /traffic_lights/center + model_path: /perception_models/traffic_light.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false + +right_traffic_light_node: + ros__parameters: + camera_topic: /camera/right/image_color publish_vis_topic: /traffic_lights_viz - publish_detection_topic: /traffic_lights + publish_detection_topic: /traffic_lights/right model_path: /perception_models/traffic_light.pt crop_mode: CenterCrop image_size: 1024 save_detections: false + + diff --git a/src/perception/camera_object_detection/config/traffic_signs_config.yaml b/src/perception/camera_object_detection/config/traffic_signs_config.yaml index ac312d1a..f0ba0a80 100644 --- a/src/perception/camera_object_detection/config/traffic_signs_config.yaml +++ b/src/perception/camera_object_detection/config/traffic_signs_config.yaml @@ -1,8 +1,28 @@ -traffic_signs_node: +right_traffic_signs_node: ros__parameters: camera_topic: /camera/left/image_color publish_vis_topic: /traffic_signs_viz - publish_detection_topic: /traffic_signs + publish_detection_topic: /traffic_signs/left + model_path: /perception_models/traffic_signs.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false + +center_traffic_signs_node: + ros__parameters: + camera_topic: /camera/center/image_color + publish_vis_topic: /traffic_signs_viz + publish_detection_topic: /traffic_signs/center + model_path: /perception_models/traffic_signs.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false + +right_traffic_signs_node: + ros__parameters: + camera_topic: /camera/right/image_color + publish_vis_topic: /traffic_signs_viz + publish_detection_topic: /traffic_signs/right model_path: /perception_models/traffic_signs.pt crop_mode: CenterCrop image_size: 1024 diff --git a/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py b/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py index 606aeaf0..3da3aee5 100644 --- a/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py +++ b/src/perception/camera_object_detection/launch/include/post_synchronizer.launch.py @@ -11,11 +11,31 @@ def generate_launch_description(): 'post_synchronizer_config.yaml' ) - camera_object_detection_node = Node( + left_camera_object_detection_node = Node( package='camera_object_detection', executable='camera_sync_node', - name='post_synchronizer_node', + name='left_post_synchronizer_node', parameters=[config] ) - return LaunchDescription([camera_object_detection_node]) + center_camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_sync_node', + name='center_post_synchronizer_node', + parameters=[config] + ) + + right_camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_sync_node', + name='right_post_synchronizer_node', + parameters=[config] + ) + + return LaunchDescription( + [ + left_camera_object_detection_node, + center_camera_object_detection_node, + right_camera_object_detection_node + ] + ) diff --git a/src/perception/camera_object_detection/launch/include/traffic_light.launch.py b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py index 234844b8..fcf7454a 100755 --- a/src/perception/camera_object_detection/launch/include/traffic_light.launch.py +++ b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py @@ -11,11 +11,31 @@ def generate_launch_description(): 'traffic_light_config.yaml' ) - camera_object_detection_node = Node( + left_camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', - name='traffic_light_node', + name='left_traffic_light_node', parameters=[config] ) - return LaunchDescription([camera_object_detection_node]) + center_camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_object_detection_node', + name='center_traffic_light_node', + parameters=[config] + ) + + right_camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_object_detection_node', + name='right_traffic_light_node', + parameters=[config] + ) + + return LaunchDescription( + [ + left_camera_object_detection_node, + center_camera_object_detection_node, + right_camera_object_detection_node + ] + ) diff --git a/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py b/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py index cdb0134f..a8c0c05c 100644 --- a/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py +++ b/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py @@ -11,11 +11,31 @@ def generate_launch_description(): 'traffic_signs_config.yaml' ) - camera_object_detection_node = Node( + left_camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', - name='traffic_signs_node', + name='left_traffic_signs_node', parameters=[config] ) - return LaunchDescription([camera_object_detection_node]) + center_camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_object_detection_node', + name='center_traffic_signs_node', + parameters=[config] + ) + + right_camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_object_detection_node', + name='right_traffic_signs_node', + parameters=[config] + ) + + return LaunchDescription( + [ + left_camera_object_detection_node, + center_camera_object_detection_node, + right_camera_object_detection_node + ] + )