From c1692d58d590ccbce49941458cbd1a0716554125 Mon Sep 17 00:00:00 2001 From: Momin Chaudhry Date: Mon, 3 Jun 2024 21:37:36 +0000 Subject: [PATCH 1/6] working on combined --- .../config/combined_config.yaml | 55 +++++++++++++++++++ .../launch/include/combined_model.launch.py | 40 ++++++++++++++ 2 files changed, 95 insertions(+) create mode 100644 src/perception/camera_object_detection/config/combined_config.yaml create mode 100644 src/perception/camera_object_detection/launch/include/combined_model.launch.py diff --git a/src/perception/camera_object_detection/config/combined_config.yaml b/src/perception/camera_object_detection/config/combined_config.yaml new file mode 100644 index 00000000..6cfb621c --- /dev/null +++ b/src/perception/camera_object_detection/config/combined_config.yaml @@ -0,0 +1,55 @@ +# left_combined_detection_node: +# ros__parameters: +# camera_topic: /camera/left/image_color +# publish_vis_topic: /camera/left/camera_detections_viz +# publish_detection_topic: /camera/left/camera_detections +# model_path: /perception_models/yolov8m.pt +# image_size: 1024 + +left_combined_detection_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /camera/left/combined_detection_viz + publish_detection_topic: /camera/left/combined_detection + models: + - name: traffic_signs + model_path: /perception_models/traffic_signs_v1.pt + - name: traffic_lights + model_path: /perception_models/traffic_light.pt + - name: pretrained_yolov8 + model_path: /perception_models/yolov8m.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false + +center_combined_detection_node: + ros__parameters: + camera_topic: /camera/center/image_color + publish_vis_topic: /camera/center/combined_detection_viz + publish_detection_topic: /camera/center/combined_detection + models: + - name: traffic_signs + model_path: /perception_models/traffic_signs_v1.pt + - name: traffic_lights + model_path: /perception_models/traffic_light.pt + - name: pretrained_yolov8 + model_path: /perception_models/yolov8m.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false + +right_combined_detection_node: + ros__parameters: + camera_topic: /camera/right/image_color + publish_vis_topic: /camera/right/combined_detection_viz + publish_detection_topic: /camera/right/combined_detection + models: + - name: traffic_signs + model_path: /perception_models/traffic_signs_v1.pt + - name: traffic_lights + model_path: /perception_models/traffic_light.pt + - name: pretrained_yolov8 + model_path: /perception_models/yolov8m.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false \ No newline at end of file diff --git a/src/perception/camera_object_detection/launch/include/combined_model.launch.py b/src/perception/camera_object_detection/launch/include/combined_model.launch.py new file mode 100644 index 00000000..33c4274e --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/combined_model.launch.py @@ -0,0 +1,40 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory("camera_object_detection"), + "config", + "combined_config.yaml" + ) + + left_combined_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="left_combined_detection_node", + parameters=[config], + ) + + center_combined_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="center_combined_detection_node", + parameters=[config], + ) + + right_combined_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="right_combined_detection_node", + parameters=[config], + ) + + return LaunchDescription( + [ + left_combined_detection_node, + center_combined_detection_node, + right_combined_detection_node, + ] + ) From 29f3d420b5767bad5b680e3fcc0c431c33a453c2 Mon Sep 17 00:00:00 2001 From: Momin Chaudhry Date: Tue, 4 Jun 2024 23:41:13 +0000 Subject: [PATCH 2/6] added model iterator --- modules/docker-compose.perception.yaml | 4 +- .../yolov8_detection.py | 98 +++++++++++-------- .../config/combined_config.yaml | 41 ++++---- .../config/traffic_signs_config.yaml | 2 +- watod-config.sh | 4 +- 5 files changed, 83 insertions(+), 66 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index d30474f9..b81eac61 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -29,11 +29,11 @@ services: - driver: nvidia count: 1 capabilities: [ gpu ] - command: /bin/bash -c "ros2 launch camera_object_detection eve_launch.py" + command: /bin/bash -c "ros2 launch camera_object_detection eve.launch.py" volumes: - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt - - /mnt/wato-drive2/perception_models/traffic_signs_v0.pt:/perception_models/traffic_signs_v1.pt + - /mnt/wato-drive2/perception_models/traffic_signs_v3.pt:/perception_models/traffic_signs.pt lidar_object_detection: build: diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index 05ceb431..29ce05d8 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -23,6 +23,13 @@ import torch +class Model(): + def __init__(self, name, model_path, device): + self.name = name + self.model_path = model_path + self.model = AutoBackend(self.model_path, device=device, dnn=False, fp16=False) + self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names + self.stride = int(self.model.stride) class CameraDetectionNode(Node): @@ -44,7 +51,6 @@ def __init__(self): self.camera_topic = self.get_parameter("camera_topic").value self.publish_vis_topic = self.get_parameter("publish_vis_topic").value self.publish_detection_topic = self.get_parameter("publish_detection_topic").value - self.model_path = self.get_parameter("model_path").value self.image_size = self.get_parameter("image_size").value self.compressed = self.get_parameter("compressed").value self.crop_mode = self.get_parameter("crop_mode").value @@ -82,11 +88,7 @@ def __init__(self): self.cv_bridge = CvBridge() # load yolov8 model - self.model = AutoBackend(self.model_path, device=self.device, dnn=False, fp16=False) - - self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names - - self.stride = int(self.model.stride) + self.models = self.load_models() # setup vis publishers self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10) @@ -98,9 +100,22 @@ def __init__(self): f"Successfully created node listening on camera topic: {self.camera_topic}..." ) - def crop_image(self, cv_image): + def load_models(self): + model_param = self.get_parameter("models").value + models = {} + + for key, value in model_param.items(): + model_name = value["name"] + model_path = value["model_path"] + + if model_name and model_path: + models[key] = Model(model_name, model_path, self.device) + + return models + + def crop_image(self, cv_image, model): if self.crop_mode == "LetterBox": - img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + img = LetterBox(self.image_size, stride=model.stride)(image=cv_image) elif self.crop_mode == "CenterCrop": img = CenterCrop(self.image_size)(cv_image) else: @@ -144,7 +159,7 @@ def convert_bboxes_to_orig_frame(self, bbox): bbox[3] * height_scale, ] - def crop_and_convert_to_tensor(self, cv_image): + def crop_and_convert_to_tensor(self, cv_image, model): """ Preprocess the image by resizing, padding and rearranging the dimensions. @@ -154,7 +169,7 @@ def crop_and_convert_to_tensor(self, cv_image): Returns: torch.Tensor image for model input of shape (1,3,w,h) """ - img = self.crop_image(cv_image) + img = self.crop_image(cv_image, model) # Convert img = img.transpose(2, 0, 1) @@ -249,42 +264,43 @@ def image_callback(self, msg): except CvBridgeError as e: self.get_logger().error(str(e)) return - - # preprocess image and run through prediction - img = self.crop_and_convert_to_tensor(cv_image) - pred = self.model(img) - - # nms function used same as yolov8 detect.py - pred = non_max_suppression(pred) + detections = [] - for i, det in enumerate(pred): # per image - if len(det): - # Write results - for *xyxy, conf, cls in reversed(det): - label = self.names[int(cls)] - - bbox = [ - xyxy[0], - xyxy[1], - xyxy[2] - xyxy[0], - xyxy[3] - xyxy[1], - ] - bbox = [b.item() for b in bbox] - bbox = self.convert_bboxes_to_orig_frame(bbox) - - detections.append( - { - "label": label, - "conf": conf.item(), - "bbox": bbox, - } - ) - self.get_logger().debug(f"{label}: {bbox}") + for model in self.models: + # preprocess image and run through prediction + img = self.crop_and_convert_to_tensor(cv_image, model) + pred = model.model(img) + + # nms function used same as yolov8 detect.py + pred = non_max_suppression(pred) + for i, det in enumerate(pred): # per image + if len(det): + # Write results + for *xyxy, conf, cls in reversed(det): + label = model.names[int(cls)] + + bbox = [ + xyxy[0], + xyxy[1], + xyxy[2] - xyxy[0], + xyxy[3] - xyxy[1], + ] + bbox = [b.item() for b in bbox] + bbox = self.convert_bboxes_to_orig_frame(bbox) + + detections.append( + { + "label": label, + "conf": conf.item(), + "bbox": bbox, + } + ) + self.get_logger().debug(f"{label}: {bbox}") annotator = Annotator( cv_image, line_width=self.line_thickness, - example=str(self.names), + example=str(model.names), ) (detections, annotated_img) = self.postprocess_detections(detections, annotator) diff --git a/src/perception/camera_object_detection/config/combined_config.yaml b/src/perception/camera_object_detection/config/combined_config.yaml index 6cfb621c..d05751bb 100644 --- a/src/perception/camera_object_detection/config/combined_config.yaml +++ b/src/perception/camera_object_detection/config/combined_config.yaml @@ -1,22 +1,17 @@ -# left_combined_detection_node: -# ros__parameters: -# camera_topic: /camera/left/image_color -# publish_vis_topic: /camera/left/camera_detections_viz -# publish_detection_topic: /camera/left/camera_detections -# model_path: /perception_models/yolov8m.pt -# image_size: 1024 - left_combined_detection_node: ros__parameters: camera_topic: /camera/left/image_color publish_vis_topic: /camera/left/combined_detection_viz publish_detection_topic: /camera/left/combined_detection models: - - name: traffic_signs - model_path: /perception_models/traffic_signs_v1.pt - - name: traffic_lights + traffic_signs: + name: traffic_signs + model_path: /perception_models/traffic_signs.pt + traffic_lights: + name: traffic_lights model_path: /perception_models/traffic_light.pt - - name: pretrained_yolov8 + pretrained_yolov8: + name: pretrained_yolov8 model_path: /perception_models/yolov8m.pt crop_mode: CenterCrop image_size: 1024 @@ -28,11 +23,14 @@ center_combined_detection_node: publish_vis_topic: /camera/center/combined_detection_viz publish_detection_topic: /camera/center/combined_detection models: - - name: traffic_signs - model_path: /perception_models/traffic_signs_v1.pt - - name: traffic_lights + traffic_signs: + name: traffic_signs + model_path: /perception_models/traffic_signs.pt + traffic_lights: + name: traffic_lights model_path: /perception_models/traffic_light.pt - - name: pretrained_yolov8 + pretrained_yolov8: + name: pretrained_yolov8 model_path: /perception_models/yolov8m.pt crop_mode: CenterCrop image_size: 1024 @@ -44,11 +42,14 @@ right_combined_detection_node: publish_vis_topic: /camera/right/combined_detection_viz publish_detection_topic: /camera/right/combined_detection models: - - name: traffic_signs - model_path: /perception_models/traffic_signs_v1.pt - - name: traffic_lights + traffic_signs: + name: traffic_signs + model_path: /perception_models/traffic_signs.pt + traffic_lights: + name: traffic_lights model_path: /perception_models/traffic_light.pt - - name: pretrained_yolov8 + pretrained_yolov8: + name: pretrained_yolov8 model_path: /perception_models/yolov8m.pt crop_mode: CenterCrop image_size: 1024 diff --git a/src/perception/camera_object_detection/config/traffic_signs_config.yaml b/src/perception/camera_object_detection/config/traffic_signs_config.yaml index 8a143a77..ac312d1a 100644 --- a/src/perception/camera_object_detection/config/traffic_signs_config.yaml +++ b/src/perception/camera_object_detection/config/traffic_signs_config.yaml @@ -3,7 +3,7 @@ traffic_signs_node: camera_topic: /camera/left/image_color publish_vis_topic: /traffic_signs_viz publish_detection_topic: /traffic_signs - model_path: /perception_models/traffic_signs_v1.pt + model_path: /perception_models/traffic_signs.pt crop_mode: CenterCrop image_size: 1024 save_detections: false diff --git a/watod-config.sh b/watod-config.sh index 3255dd40..e38bad9e 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -15,7 +15,7 @@ ## - simulation : starts simulation ## - samples : starts sample ROS2 pubsub nodes -# ACTIVE_MODULES="" +# ACTIVE_MODULES="perception infrastructure" ################################# MODE OF OPERATION ################################# ## Possible modes of operation when running watod. @@ -23,7 +23,7 @@ ## - deploy (default) : runs production-grade containers (non-editable) ## - develop : runs developer containers (editable) -# MODE_OF_OPERATION="" +# MODE_OF_OPERATION="develop" ############################## ADVANCED CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" From 22ef1f26de9a1deeb5aa108183bc3d6cd4ed152a Mon Sep 17 00:00:00 2001 From: Momin Chaudhry Date: Tue, 4 Jun 2024 23:42:02 +0000 Subject: [PATCH 3/6] Remove watod changes --- watod-config.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/watod-config.sh b/watod-config.sh index e38bad9e..3255dd40 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -15,7 +15,7 @@ ## - simulation : starts simulation ## - samples : starts sample ROS2 pubsub nodes -# ACTIVE_MODULES="perception infrastructure" +# ACTIVE_MODULES="" ################################# MODE OF OPERATION ################################# ## Possible modes of operation when running watod. @@ -23,7 +23,7 @@ ## - deploy (default) : runs production-grade containers (non-editable) ## - develop : runs developer containers (editable) -# MODE_OF_OPERATION="develop" +# MODE_OF_OPERATION="" ############################## ADVANCED CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" From f4753cdc6ddc3af2b801335b3472a5f41dac87cb Mon Sep 17 00:00:00 2001 From: Momin Chaudhry Date: Thu, 6 Jun 2024 21:32:36 +0000 Subject: [PATCH 4/6] Trying to pass models in --- .../yolov8_detection.py | 30 ++++++-- .../launch/eve.launch.py | 77 +++++++++---------- 2 files changed, 58 insertions(+), 49 deletions(-) diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index 29ce05d8..123871f9 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -36,7 +36,7 @@ class CameraDetectionNode(Node): def __init__(self): torch.zeros(1).cuda() - super().__init__("camera_object_detection_node") + super().__init__("left_combined_detection_node") self.get_logger().info("Creating camera detection node...") self.declare_parameter("camera_topic", "/camera/right/image_color") @@ -101,12 +101,26 @@ def __init__(self): ) def load_models(self): - model_param = self.get_parameter("models").value - models = {} - for key, value in model_param.items(): - model_name = value["name"] - model_path = value["model_path"] + # traffic_signs: + # name: traffic_signs + # model_path: /perception_models/traffic_signs.pt + # traffic_lights: + # name: traffic_lights + # model_path: /perception_models/traffic_light.pt + # pretrained_yolov8: + # name: pretrained_yolov8 + # model_path: /perception_models/yolov8m.pt + + #model_param = self.get_parameter("models").value + models = {} + models_param = {"traffic_signs": "traffic_signs.pt", + "traffic_lights": "traffic_light.pt", + "pretrained_yolov8": "yolov8m.pt"} + + for key, value in models_param.items(): + model_name = key + model_path = value if model_name and model_path: models[key] = Model(model_name, model_path, self.device) @@ -299,8 +313,8 @@ def image_callback(self, msg): annotator = Annotator( cv_image, - line_width=self.line_thickness, - example=str(model.names), + line_width=self.line_thickness #, + #example=str(model.names), ) (detections, annotated_img) = self.postprocess_detections(detections, annotator) diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py index 9ff4992e..2ee8be63 100755 --- a/src/perception/camera_object_detection/launch/eve.launch.py +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -3,55 +3,50 @@ from launch.conditions import LaunchConfigurationEquals from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch_ros.actions import Node from ament_index_python.packages import get_package_share_directory import os def generate_launch_description(): - launch_traffic_light = LaunchConfiguration("launch_traffic_light", default=True) - launch_traffic_light_arg = DeclareLaunchArgument( - "launch_traffic_light", - default_value=launch_traffic_light, - description="Launch traffic light detection", - ) - launch_traffic_signs = LaunchConfiguration("launch_traffic_signs", default=True) - launch_traffic_signs_arg = DeclareLaunchArgument( - "launch_traffic_signs", - default_value=launch_traffic_signs, - description="Launch traffic signs detection", - ) - - launch_args = [launch_traffic_light_arg, launch_traffic_signs_arg] - - camera_object_detection_launch_include_dir = os.path.join( - get_package_share_directory("camera_object_detection"), "launch", "include" - ) - - pretrained_yolov8_launch = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [camera_object_detection_launch_include_dir, "/pretrained_yolov8.launch.py"] - ), - ) - - traffic_light_launch = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [camera_object_detection_launch_include_dir, "/traffic_light.launch.py"] - ), - condition=LaunchConfigurationEquals("launch_traffic_light", "True"), + # launch_traffic_light = LaunchConfiguration("launch_traffic_light", default=True) + # launch_traffic_light_arg = DeclareLaunchArgument( + # "launch_traffic_light", + # default_value=launch_traffic_light, + # description="Launch traffic light detection", + # ) + # launch_traffic_signs = LaunchConfiguration("launch_traffic_signs", default=True) + # launch_traffic_signs_arg = DeclareLaunchArgument( + # "launch_traffic_signs", + # default_value=launch_traffic_signs, + # description="Launch traffic signs detection", + # ) + + # launch_args = [launch_traffic_light_arg, launch_traffic_signs_arg] + + # camera_object_detection_launch_include_dir = os.path.join( + # get_package_share_directory("camera_object_detection"), "launch", "include" + # ) + + # combined_launch = IncludeLaunchDescription( + # PythonLaunchDescriptionSource( + # [camera_object_detection_launch_include_dir, "/combined_model.launch.py"] + # ), + # ) +asdasdad + config = os.path.join( + get_package_share_directory("camera_object_detection"), + "config", + "combined_config.yaml" ) - traffic_signs_launch = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [camera_object_detection_launch_include_dir, "/traffic_signs.launch.py"] - ), - condition=LaunchConfigurationEquals("launch_traffic_signs", "True"), + left_combined_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="left_combined_detection_node", + parameters=[config], ) return LaunchDescription( - launch_args - + [ - pretrained_yolov8_launch, - traffic_light_launch, - traffic_signs_launch, - ] + [left_combined_detection_node] ) From 560529066b52cf82df31ecc5e90c5a2ac0ac6d89 Mon Sep 17 00:00:00 2001 From: Mark Chiu Date: Tue, 18 Jun 2024 01:35:21 +0000 Subject: [PATCH 5/6] Combine models for camera detections --- .../yolov8_detection.py | 40 ++++++----------- .../config/combined_config.yaml | 36 +++++++-------- .../launch/eve.launch.py | 45 ++++++++----------- .../launch/include/combined_model.launch.py | 40 ----------------- 4 files changed, 50 insertions(+), 111 deletions(-) delete mode 100644 src/perception/camera_object_detection/launch/include/combined_model.launch.py diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index 123871f9..f255443d 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -24,8 +24,7 @@ import torch class Model(): - def __init__(self, name, model_path, device): - self.name = name + def __init__(self, model_path, device): self.model_path = model_path self.model = AutoBackend(self.model_path, device=device, dnn=False, fp16=False) self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names @@ -42,7 +41,9 @@ def __init__(self): self.declare_parameter("camera_topic", "/camera/right/image_color") self.declare_parameter("publish_vis_topic", "/annotated_img") self.declare_parameter("publish_detection_topic", "/detections") - self.declare_parameter("model_path", "/perception_models/yolov8m.pt") + self.declare_parameter("models.traffic_signs.model_path", "/perception_models/traffic_signs.pt") + self.declare_parameter("models.traffic_light.model_path", "/perception_models/traffic_light.pt") + self.declare_parameter("models.pretrained_yolov8m.model_path", "/perception_models/yolov8m.pt") self.declare_parameter("image_size", 1024) self.declare_parameter("compressed", False) self.declare_parameter("crop_mode", "LetterBox") @@ -51,6 +52,11 @@ def __init__(self): self.camera_topic = self.get_parameter("camera_topic").value self.publish_vis_topic = self.get_parameter("publish_vis_topic").value self.publish_detection_topic = self.get_parameter("publish_detection_topic").value + self.model_paths = [ + self.get_parameter("models.traffic_signs.model_path").value, + self.get_parameter("models.traffic_light.model_path").value, + self.get_parameter("models.pretrained_yolov8m.model_path").value + ] self.image_size = self.get_parameter("image_size").value self.compressed = self.get_parameter("compressed").value self.crop_mode = self.get_parameter("crop_mode").value @@ -101,30 +107,10 @@ def __init__(self): ) def load_models(self): - - # traffic_signs: - # name: traffic_signs - # model_path: /perception_models/traffic_signs.pt - # traffic_lights: - # name: traffic_lights - # model_path: /perception_models/traffic_light.pt - # pretrained_yolov8: - # name: pretrained_yolov8 - # model_path: /perception_models/yolov8m.pt - - #model_param = self.get_parameter("models").value - models = {} - models_param = {"traffic_signs": "traffic_signs.pt", - "traffic_lights": "traffic_light.pt", - "pretrained_yolov8": "yolov8m.pt"} - - for key, value in models_param.items(): - model_name = key - model_path = value - - if model_name and model_path: - models[key] = Model(model_name, model_path, self.device) - + models = [] + for model_path in self.model_paths: + if model_path: + models.append(Model(model_path, self.device)) return models def crop_image(self, cv_image, model): diff --git a/src/perception/camera_object_detection/config/combined_config.yaml b/src/perception/camera_object_detection/config/combined_config.yaml index d05751bb..422658cf 100644 --- a/src/perception/camera_object_detection/config/combined_config.yaml +++ b/src/perception/camera_object_detection/config/combined_config.yaml @@ -4,15 +4,15 @@ left_combined_detection_node: publish_vis_topic: /camera/left/combined_detection_viz publish_detection_topic: /camera/left/combined_detection models: + pretrained_yolov8m: + name: yolov8m + model_path: /perception_models/yolov8m.pt + traffic_light: + name: traffic_light + model_path: /perception_models/traffic_light.pt traffic_signs: name: traffic_signs model_path: /perception_models/traffic_signs.pt - traffic_lights: - name: traffic_lights - model_path: /perception_models/traffic_light.pt - pretrained_yolov8: - name: pretrained_yolov8 - model_path: /perception_models/yolov8m.pt crop_mode: CenterCrop image_size: 1024 save_detections: false @@ -23,15 +23,15 @@ center_combined_detection_node: publish_vis_topic: /camera/center/combined_detection_viz publish_detection_topic: /camera/center/combined_detection models: + pretrained_yolov8m: + name: yolov8m + model_path: /perception_models/yolov8m.pt + traffic_light: + name: traffic_light + model_path: /perception_models/traffic_light.pt traffic_signs: name: traffic_signs model_path: /perception_models/traffic_signs.pt - traffic_lights: - name: traffic_lights - model_path: /perception_models/traffic_light.pt - pretrained_yolov8: - name: pretrained_yolov8 - model_path: /perception_models/yolov8m.pt crop_mode: CenterCrop image_size: 1024 save_detections: false @@ -42,15 +42,15 @@ right_combined_detection_node: publish_vis_topic: /camera/right/combined_detection_viz publish_detection_topic: /camera/right/combined_detection models: + pretrained_yolov8m: + name: yolov8m + model_path: /perception_models/yolov8m.pt + traffic_light: + name: traffic_light + model_path: /perception_models/traffic_light.pt traffic_signs: name: traffic_signs model_path: /perception_models/traffic_signs.pt - traffic_lights: - name: traffic_lights - model_path: /perception_models/traffic_light.pt - pretrained_yolov8: - name: pretrained_yolov8 - model_path: /perception_models/yolov8m.pt crop_mode: CenterCrop image_size: 1024 save_detections: false \ No newline at end of file diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py index 2ee8be63..a4857dce 100755 --- a/src/perception/camera_object_detection/launch/eve.launch.py +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -9,31 +9,6 @@ def generate_launch_description(): - # launch_traffic_light = LaunchConfiguration("launch_traffic_light", default=True) - # launch_traffic_light_arg = DeclareLaunchArgument( - # "launch_traffic_light", - # default_value=launch_traffic_light, - # description="Launch traffic light detection", - # ) - # launch_traffic_signs = LaunchConfiguration("launch_traffic_signs", default=True) - # launch_traffic_signs_arg = DeclareLaunchArgument( - # "launch_traffic_signs", - # default_value=launch_traffic_signs, - # description="Launch traffic signs detection", - # ) - - # launch_args = [launch_traffic_light_arg, launch_traffic_signs_arg] - - # camera_object_detection_launch_include_dir = os.path.join( - # get_package_share_directory("camera_object_detection"), "launch", "include" - # ) - - # combined_launch = IncludeLaunchDescription( - # PythonLaunchDescriptionSource( - # [camera_object_detection_launch_include_dir, "/combined_model.launch.py"] - # ), - # ) -asdasdad config = os.path.join( get_package_share_directory("camera_object_detection"), "config", @@ -47,6 +22,24 @@ def generate_launch_description(): parameters=[config], ) + center_combined_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="center_combined_detection_node", + parameters=[config], + ) + + right_combined_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="right_combined_detection_node", + parameters=[config], + ) + return LaunchDescription( - [left_combined_detection_node] + [ + left_combined_detection_node, + center_combined_detection_node, + right_combined_detection_node + ] ) diff --git a/src/perception/camera_object_detection/launch/include/combined_model.launch.py b/src/perception/camera_object_detection/launch/include/combined_model.launch.py deleted file mode 100644 index 33c4274e..00000000 --- a/src/perception/camera_object_detection/launch/include/combined_model.launch.py +++ /dev/null @@ -1,40 +0,0 @@ -from launch import LaunchDescription -from launch_ros.actions import Node -from ament_index_python.packages import get_package_share_directory -import os - -def generate_launch_description(): - config = os.path.join( - get_package_share_directory("camera_object_detection"), - "config", - "combined_config.yaml" - ) - - left_combined_detection_node = Node( - package="camera_object_detection", - executable="camera_object_detection_node", - name="left_combined_detection_node", - parameters=[config], - ) - - center_combined_detection_node = Node( - package="camera_object_detection", - executable="camera_object_detection_node", - name="center_combined_detection_node", - parameters=[config], - ) - - right_combined_detection_node = Node( - package="camera_object_detection", - executable="camera_object_detection_node", - name="right_combined_detection_node", - parameters=[config], - ) - - return LaunchDescription( - [ - left_combined_detection_node, - center_combined_detection_node, - right_combined_detection_node, - ] - ) From dfb5383939ffc3b66df3f2aac40a3b06f5e952c5 Mon Sep 17 00:00:00 2001 From: Dan Huynh Date: Wed, 3 Jul 2024 09:11:22 -0400 Subject: [PATCH 6/6] linted --- .../camera_object_detection/yolov8_detection.py | 17 +++++++++++------ .../launch/eve.launch.py | 4 ++-- src/samples/python/aggregator/setup.py | 2 +- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index f255443d..67ddf741 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -23,6 +23,7 @@ import torch + class Model(): def __init__(self, model_path, device): self.model_path = model_path @@ -30,6 +31,7 @@ def __init__(self, model_path, device): self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names self.stride = int(self.model.stride) + class CameraDetectionNode(Node): def __init__(self): @@ -41,9 +43,12 @@ def __init__(self): self.declare_parameter("camera_topic", "/camera/right/image_color") self.declare_parameter("publish_vis_topic", "/annotated_img") self.declare_parameter("publish_detection_topic", "/detections") - self.declare_parameter("models.traffic_signs.model_path", "/perception_models/traffic_signs.pt") - self.declare_parameter("models.traffic_light.model_path", "/perception_models/traffic_light.pt") - self.declare_parameter("models.pretrained_yolov8m.model_path", "/perception_models/yolov8m.pt") + self.declare_parameter("models.traffic_signs.model_path", + "/perception_models/traffic_signs.pt") + self.declare_parameter("models.traffic_light.model_path", + "/perception_models/traffic_light.pt") + self.declare_parameter("models.pretrained_yolov8m.model_path", + "/perception_models/yolov8m.pt") self.declare_parameter("image_size", 1024) self.declare_parameter("compressed", False) self.declare_parameter("crop_mode", "LetterBox") @@ -264,7 +269,7 @@ def image_callback(self, msg): except CvBridgeError as e: self.get_logger().error(str(e)) return - + detections = [] for model in self.models: # preprocess image and run through prediction @@ -299,8 +304,8 @@ def image_callback(self, msg): annotator = Annotator( cv_image, - line_width=self.line_thickness #, - #example=str(model.names), + line_width=self.line_thickness, + # example=str(model.names), ) (detections, annotated_img) = self.postprocess_detections(detections, annotator) diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py index a4857dce..b840698b 100755 --- a/src/perception/camera_object_detection/launch/eve.launch.py +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -10,8 +10,8 @@ def generate_launch_description(): config = os.path.join( - get_package_share_directory("camera_object_detection"), - "config", + get_package_share_directory("camera_object_detection"), + "config", "combined_config.yaml" ) diff --git a/src/samples/python/aggregator/setup.py b/src/samples/python/aggregator/setup.py index b0afb9f6..f77c1804 100755 --- a/src/samples/python/aggregator/setup.py +++ b/src/samples/python/aggregator/setup.py @@ -14,7 +14,7 @@ # Include our package.xml file (os.path.join('share', package_name), ['package.xml']), # Include all launch files. - (os.path.join('share', package_name, 'launch'), \ + (os.path.join('share', package_name, 'launch'), glob(os.path.join('launch', '*.launch.py'))), ], install_requires=['setuptools'],