diff --git a/.gitignore b/.gitignore index 0256a9906006..bb2729b8e3d9 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,4 @@ perf.data* # Screenshots from samples etc. screenshot*.png +venv* diff --git a/crates/re_viewer/src/depthai/depthai.rs b/crates/re_viewer/src/depthai/depthai.rs index 2791265d61b8..a681fb35f4a2 100644 --- a/crates/re_viewer/src/depthai/depthai.rs +++ b/crates/re_viewer/src/depthai/depthai.rs @@ -324,7 +324,7 @@ pub struct DeviceConfig { impl Default for DeviceConfig { fn default() -> Self { Self { - auto: true, + auto: false, cameras: Vec::new(), depth_enabled: true, depth: Some(DepthConfig::default()), @@ -771,6 +771,7 @@ impl State { applied_device_config.depth_enabled = config.depth.is_some(); self.modified_device_config.depth_enabled = self.modified_device_config.depth.is_some(); + self.modified_device_config.auto = false; // Always reset auto self.set_subscriptions(&subs); self.set_update_in_progress(false); } @@ -778,6 +779,7 @@ impl State { re_log::debug!("Setting device: {device:?}"); self.set_device(device); if !self.selected_device.id.is_empty() { + self.modified_device_config.auto = true; // Apply default pipeline self.set_pipeline(&mut self.modified_device_config.clone(), false); } diff --git a/crates/re_viewer/src/ui/auto_layout.rs b/crates/re_viewer/src/ui/auto_layout.rs index e94a085fe9a1..819900902158 100644 --- a/crates/re_viewer/src/ui/auto_layout.rs +++ b/crates/re_viewer/src/ui/auto_layout.rs @@ -340,7 +340,7 @@ fn create_inner_viewport_layout( .iter() .filter(|space| { if let Some(last) = space.path.as_ref().and_then(|path| path.as_slice().last()) { - last == &EntityPathPart::from("color_cam") + last == &EntityPathPart::from("color_cam") || last == &EntityPathPart::from("tof") // Treat TOF as color for now } else { false } @@ -551,8 +551,8 @@ pub(crate) fn default_tree_from_space_views( let tree_clone = tree.clone(); let color_tabs = tree_clone.tabs().filter(|tab| { if let Some(space_path) = tab.space_path.clone() { - if let Some(first_part) = space_path.as_slice().first() { - first_part == &EntityPathPart::from("CAM_A") + if let Some(first_part) = space_path.as_slice().get(space_path.len() - 2) { + first_part == &EntityPathPart::from("color_cam") } else { false } diff --git a/crates/re_viewer/src/ui/selection_panel.rs b/crates/re_viewer/src/ui/selection_panel.rs index f10cf76ca97e..fabc68b9c611 100644 --- a/crates/re_viewer/src/ui/selection_panel.rs +++ b/crates/re_viewer/src/ui/selection_panel.rs @@ -364,6 +364,13 @@ fn colormap_props_ui( ) { // Color mapping picker { + if entity_props.color_mapper.get() == &ColorMapper::AlbedoTexture { + if entity_props.albedo_texture.is_none() { + entity_props.color_mapper = EditableAutoValue::Auto(ColorMapper::Colormap( + Colormap::Turbo, // Same default as in images.rs (scene part) + )); + } + } let current = *entity_props.color_mapper.get(); ui.label("Color map"); egui::ComboBox::from_id_source("depth_color_mapper") diff --git a/crates/re_viewer/src/ui/space_view.rs b/crates/re_viewer/src/ui/space_view.rs index 79e32423fbf1..c788fe0909e8 100644 --- a/crates/re_viewer/src/ui/space_view.rs +++ b/crates/re_viewer/src/ui/space_view.rs @@ -98,8 +98,9 @@ impl SpaceView { let mut is_2d = false; if !is_3d { let last_part = space_path.iter().last().unwrap(); - is_2d = (last_part == &EntityPathPart::from("mono_cam") + is_2d = ((last_part == &EntityPathPart::from("mono_cam") || last_part == &EntityPathPart::from("color_cam")) + || last_part == &EntityPathPart::from("tof")) && last_part != &EntityPathPart::from("transform"); } if let Some(board_socket) = diff --git a/crates/re_viewer/src/ui/space_view_heuristics.rs b/crates/re_viewer/src/ui/space_view_heuristics.rs index 612134c46639..d08e7b4d3c7f 100644 --- a/crates/re_viewer/src/ui/space_view_heuristics.rs +++ b/crates/re_viewer/src/ui/space_view_heuristics.rs @@ -207,7 +207,7 @@ fn default_depthai_space_views( .collect::>(); // If a depth tensor is found, we want to find the 2D space view that has the Image + Depth tensor. - // We then wan't to create two separate 2D space views, one for the image and one for the depth. + // We then want to create two separate 2D space views, one for the image and one for the depth. // But we only want to hide the depth (or image), not remove it from the space view. if let Some(depth_2d) = space_views .iter_mut() diff --git a/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs b/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs index d7f6d9731d0e..2e00c22fbb24 100644 --- a/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs +++ b/crates/re_viewer/src/ui/view_spatial/scene/scene_part/images.rs @@ -2,7 +2,7 @@ use egui::NumExt; use glam::Vec3; use itertools::Itertools; -use re_data_store::{ query_latest_single, EntityPath, EntityProperties }; +use re_data_store::{ query_latest_single, EntityPath, EntityProperties, EditableAutoValue }; use re_log_types::{ component_types::{ ColorRGBA, InstanceKey, Tensor, TensorData, TensorDataMeaning }, Component, @@ -344,7 +344,9 @@ impl ImagesPart { "Albedo texture couldn't be fetched ({:?})", properties.albedo_texture ); - colormap = Colormap::Grayscale; + colormap = Colormap::Turbo; + // Would need some way to update the space view blueprint properties here - to reflect the change in colormap. + // For now set the matching default in selection_panel.rs } } diff --git a/rerun_py/depthai_viewer/_backend/device.py b/rerun_py/depthai_viewer/_backend/device.py index 2d3d8cad8861..45010c0bab71 100644 --- a/rerun_py/depthai_viewer/_backend/device.py +++ b/rerun_py/depthai_viewer/_backend/device.py @@ -7,11 +7,11 @@ import numpy as np from depthai_sdk import OakCamera from depthai_sdk.components import CameraComponent, NNComponent, StereoComponent -from depthai_sdk.components.tof_component import ToFComponent, Component +from depthai_sdk.components.tof_component import Component from depthai_sdk.components.camera_helper import ( getClosestIspScale, ) -from depthai_sdk.classes.packet_handlers import QueuePacketHandler, ComponentOutput +from depthai_sdk.classes.packet_handlers import ComponentOutput from numpy.typing import NDArray import depthai_viewer as viewer @@ -29,6 +29,8 @@ compare_dai_camera_configs, get_size_from_resolution, size_to_resolution, + DepthConfiguration, + ALL_NEURAL_NETWORKS, ) from depthai_viewer._backend.messages import ( ErrorMessage, @@ -36,12 +38,7 @@ Message, WarningMessage, ) -from depthai_viewer._backend.packet_handler import ( - AiModelCallbackArgs, - DepthCallbackArgs, - PacketHandler, - SyncedCallbackArgs, -) +from depthai_viewer._backend.packet_handler import PacketHandler from depthai_viewer._backend.store import Store @@ -84,9 +81,7 @@ class Device: _xlink_statistics: Optional[XlinkStatistics] = None _sys_info_q: Optional[Queue] = None # type: ignore[type-arg] _pipeline_start_t: Optional[float] = None - _queues: Dict[ - Component, QueuePacketHandler - ] = {} + _queues: List[Tuple[Component, ComponentOutput]] = [] # _profiler = cProfile.Profile() @@ -285,13 +280,54 @@ def _create_auto_pipeline_config(self, config: PipelineConfiguration) -> Message connected_cam_features = self._oak.device.getConnectedCameraFeatures() if not connected_cam_features: return ErrorMessage("No camera features found, can't create auto pipeline config!") - n_cams = len(connected_cam_features) + + print("Connected camera features: ", connected_cam_features) + # Step 1: Create all the cameras. Try to find RGB cam, to align depth to it later + # Step 2: Create stereo depth if calibration is present. Align to RGB if present, otherwise to left cam + # Step 3: Create YOLO + rgb_cam_socket = None + # 1. Create all the cameras + config.cameras = [] + has_tof = False for cam in connected_cam_features: + if cam.name == "rgb": # By convention + rgb_cam_socket = cam.socket + resolution = CameraSensorResolution.THE_1080_P if cam.width >= 1920 else CameraSensorResolution.THE_720_P + resolution = CameraSensorResolution.THE_1200_P if cam.height == 1200 else resolution + preferred_type = cam.supportedTypes[0] + if preferred_type == dai.CameraSensorType.TOF: + has_tof = True config.cameras.append( - CameraConfiguration( - ) + CameraConfiguration(resolution=resolution, kind=preferred_type, board_socket=cam.socket, name=cam.name) ) - + # 2. Create stereo depth + if not has_tof: + try: + calibration = self._oak.device.readCalibration2() + left_cam = calibration.getStereoLeftCameraId() + right_cam = calibration.getStereoRightCameraId() + if left_cam.value != 255 and right_cam.value != 255: + config.depth = DepthConfiguration( + stereo_pair=(left_cam, right_cam), + align=rgb_cam_socket if rgb_cam_socket is not None else left_cam, + ) + except RuntimeError: + calibration = None + else: + config.depth = None + # 3. Create YOLO + nnet_cam_sock = rgb_cam_socket + if nnet_cam_sock is None: + # Try to find a color camera config + nnet_cam_sock = next(filter(lambda cam: cam.kind == dai.CameraSensorType.COLOR, config.cameras), None) # type: ignore[assignment] + if nnet_cam_sock is not None: + nnet_cam_sock = nnet_cam_sock.board_socket + if nnet_cam_sock is not None: + config.ai_model = ALL_NEURAL_NETWORKS[0] + config.ai_model.camera = nnet_cam_sock + else: + config.ai_model = None + return InfoMessage("Created auto pipeline config") def update_pipeline(self, runtime_only: bool) -> Message: if self._oak is None: @@ -313,8 +349,8 @@ def update_pipeline(self, runtime_only: bool) -> Message: if isinstance(message, ErrorMessage): return message - # if config.auto: - # self._create_auto_pipeline_config(config, self._oak.device) + if config.auto: + self._create_auto_pipeline_config(config) self._cameras = [] self._stereo = None @@ -322,11 +358,6 @@ def update_pipeline(self, runtime_only: bool) -> Message: self._sys_info_q = None self._pipeline_start_t = None - synced_outputs: Dict[ - Component, ComponentOutput - ] = {} - synced_callback_args = SyncedCallbackArgs() - is_poe = self._oak.device.getDeviceInfo().protocol == dai.XLinkProtocol.X_LINK_TCP_IP print("Usb speed: ", self._oak.device.getUsbSpeed()) is_usb2 = self._oak.device.getUsbSpeed() == dai.UsbSpeed.HIGH @@ -378,9 +409,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: # Only create a camera node if it is used by stereo or AI. if cam.stream_enabled: if dai.CameraSensorType.TOF in camera_features.supportedTypes: - sdk_cam = self._oak.create_tof( - cam.board_socket - ) + sdk_cam = self._oak.create_tof(cam.board_socket) else: sdk_cam = self._oak.create_camera( cam.board_socket, @@ -395,7 +424,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: ) ) self._cameras.append(sdk_cam) - synced_outputs[sdk_cam] = sdk_cam.out.main + self._queues.append((sdk_cam, self._oak.queue(sdk_cam.out.main))) if config.depth: print("Creating depth") @@ -428,10 +457,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: aligned_camera = self._get_camera_config_by_socket(config, config.depth.align) if not aligned_camera: return ErrorMessage(f"{config.depth.align} is not configured. Couldn't create stereo pair.") - synced_callback_args.depth_args = DepthCallbackArgs( - alignment_camera=aligned_camera, stereo_pair=config.depth.stereo_pair - ) - synced_outputs[self._stereo] = self._stereo.out.main + self._queues.append((self._stereo, self._oak.queue(self._stereo.out.main))) if self._oak.device.getConnectedIMU() != "NONE": print("Creating IMU") @@ -465,16 +491,7 @@ def update_pipeline(self, runtime_only: bool) -> Message: if not camera: return ErrorMessage(f"{config.ai_model.camera} is not configured. Couldn't create NN.") - synced_callback_args.ai_args = AiModelCallbackArgs( - model_name=config.ai_model.path, camera=camera, labels=labels - ) - synced_outputs[self._nnet] = self._nnet.out.main - - # Create the sdk queues and finalize the packet handler - if synced_outputs: - for component, synced_out in synced_outputs.items(): - self._queues[component] = self._oak.queue(synced_out) - self._packet_handler.set_synced_callback_args(synced_callback_args) + self._queues.append((self._nnet, self._oak.queue(self._nnet.out.main))) sys_logger_xlink = self._oak.pipeline.createXLinkOut() logger = self._oak.pipeline.createSystemLogger() @@ -508,7 +525,7 @@ def update(self) -> None: return self._oak.poll() - for component, queue in self._queues.items(): + for component, queue in self._queues: try: packet = queue.get_queue().get_nowait() self._packet_handler.log_packet(component, packet) diff --git a/rerun_py/depthai_viewer/_backend/device_configuration.py b/rerun_py/depthai_viewer/_backend/device_configuration.py index 529204d45ea7..19f1316736fd 100644 --- a/rerun_py/depthai_viewer/_backend/device_configuration.py +++ b/rerun_py/depthai_viewer/_backend/device_configuration.py @@ -26,9 +26,9 @@ class Config: arbitrary_types_allowed = True def __init__(self, **v) -> None: # type: ignore[no-untyped-def] - if v.get("median", None): + if v.get("median", None) and isinstance(v["median"], str): v["median"] = getattr(dai.MedianFilter, v["median"]) - if v.get("align", None): + if v.get("align", None) and isinstance(v["align"], str): v["align"] = getattr(dai.CameraBoardSocket, v["align"]) if v.get("stereo_pair", None) and all(isinstance(pair, str) for pair in v["stereo_pair"]): v["stereo_pair"] = ( @@ -105,6 +105,30 @@ def dict(self, *args, **kwargs): # type: ignore[no-untyped-def] } +ALL_NEURAL_NETWORKS = [ + AiModelConfiguration( + path="yolov8n_coco_640x352", + display_name="Yolo V8", + camera=dai.CameraBoardSocket.CAM_A, + ), + AiModelConfiguration( + path="mobilenet-ssd", + display_name="MobileNet SSD", + camera=dai.CameraBoardSocket.CAM_A, + ), + AiModelConfiguration( + path="face-detection-retail-0004", + display_name="Face Detection", + camera=dai.CameraBoardSocket.CAM_A, + ), + AiModelConfiguration( + path="age-gender-recognition-retail-0013", + display_name="Age gender recognition", + camera=dai.CameraBoardSocket.CAM_A, + ), +] + + class ImuConfiguration(BaseModel): # type: ignore[misc] report_rate: int = 100 batch_report_threshold: int = 5 @@ -217,7 +241,7 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de class PipelineConfiguration(BaseModel): # type: ignore[misc] - auto: bool = True # Should the backend automatically create a pipeline based on the device. + auto: bool = False # Should the backend automatically create a pipeline? cameras: List[CameraConfiguration] = [] depth: Optional[DepthConfiguration] ai_model: Optional[AiModelConfiguration] @@ -278,6 +302,7 @@ def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-de (640, 400): CameraSensorResolution.THE_400_P, (640, 480): CameraSensorResolution.THE_480_P, # OV7251 (1280, 720): CameraSensorResolution.THE_720_P, + # (1280, 962): CameraSensorResolution.THE_1280P, # TOF (1280, 800): CameraSensorResolution.THE_800_P, # OV9782 (2592, 1944): CameraSensorResolution.THE_5_MP, # OV5645 (1440, 1080): CameraSensorResolution.THE_1440X1080, diff --git a/rerun_py/depthai_viewer/_backend/packet_handler.py b/rerun_py/depthai_viewer/_backend/packet_handler.py index 0a4880932005..eba4fbdaa14c 100644 --- a/rerun_py/depthai_viewer/_backend/packet_handler.py +++ b/rerun_py/depthai_viewer/_backend/packet_handler.py @@ -14,7 +14,7 @@ BasePacket, DisparityDepthPacket, ) -from depthai_sdk.components import Component, CameraComponent, StereoComponent +from depthai_sdk.components import Component, CameraComponent, StereoComponent, NNComponent from depthai_sdk.components.tof_component import ToFComponent from numpy.typing import NDArray from pydantic import BaseModel @@ -26,38 +26,10 @@ from depthai_viewer.components.rect2d import RectFormat -class CallbackArgs(BaseModel): # type: ignore[misc] - pass - - -class DepthCallbackArgs(CallbackArgs): # type: ignore[misc] - alignment_camera: CameraConfiguration - stereo_pair: Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket] - - class Config: - arbitrary_types_allowed = True - - -class AiModelCallbackArgs(CallbackArgs): # type: ignore[misc] - model_name: str - camera: CameraConfiguration - labels: Optional[List[str]] = None - - class Config: - arbitrary_types_allowed = True - protected_namespaces = () - - -class SyncedCallbackArgs(BaseModel): # type: ignore[misc] - depth_args: Optional[DepthCallbackArgs] = None - ai_args: Optional[AiModelCallbackArgs] = None - - class PacketHandler: store: Store _ahrs: Mahony _get_camera_intrinsics: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] - args: SyncedCallbackArgs def __init__( self, store: Store, intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] @@ -73,42 +45,38 @@ def reset(self) -> None: self._ahrs = Mahony(frequency=100) self._ahrs.Q = np.array([1, 0, 0, 0], dtype=np.float64) - def set_synced_callback_args(self, args: SyncedCallbackArgs) -> None: - self.args = args def set_camera_intrinsics_getter( self, camera_intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] ) -> None: self._get_camera_intrinsics = camera_intrinsics_getter # type: ignore[assignment, misc] - def build_sync_callback(self, args: SyncedCallbackArgs) -> Callable[[Any], None]: - return lambda packets: self._on_synced_packets(args, packets) - def log_packet( self, component: Component, packet: BasePacket, ) -> None: - if self.args is None: - raise RuntimeError("Synced callback args not set.") if type(packet) is FramePacket: if isinstance(component, CameraComponent): - self._on_camera_frame(packet, component.board_socket) + self._on_camera_frame(packet, component._socket) else: print("Unknown component type:", type(component), "for packet:", type(packet)) # Create dai.CameraBoardSocket from descriptor elif type(packet) is DepthPacket: if isinstance(component, StereoComponent): - self._on_stereo_frame(packet, self.args.depth_args) + self._on_stereo_frame(packet, component) elif type(packet) is DisparityDepthPacket: - self._on_tof_packet(packet, self.args.depth_args) + if isinstance(component, ToFComponent): + self._on_tof_packet(packet, component) + elif isinstance(component, StereoComponent): + self._on_stereo_frame(packet, component) + else: + print("Unknown component type:", type(component), "for packet:", type(packet)) elif type(packet) is DetectionPacket: - if self.args.ai_args is None: - self._on_detections(packet, self.args.ai_args) + self._on_detections(packet, component) elif type(packet) is TwoStagePacket: - if self.args.ai_args is None: - self._on_age_gender_packet(packet, self.args.ai_args) + self._on_age_gender_packet(packet, component) else: print("Unknown packet type:", type(packet)) @@ -152,14 +120,13 @@ def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSoc viewer.log_image(entity_path, img_frame) def on_imu(self, packet: IMUPacket) -> None: - for data in packet.data: - gyro: dai.IMUReportGyroscope = data.gyroscope - accel: dai.IMUReportAccelerometer = data.acceleroMeter - mag: dai.IMUReportMagneticField = data.magneticField - # TODO(filip): Move coordinate mapping to sdk - self._ahrs.Q = self._ahrs.updateIMU( - self._ahrs.Q, np.array([gyro.z, gyro.x, gyro.y]), np.array([accel.z, accel.x, accel.y]) - ) + gyro: dai.IMUReportGyroscope = packet.gyroscope + accel: dai.IMUReportAccelerometer = packet.acceleroMeter + mag: dai.IMUReportMagneticField = packet.magneticField + # TODO(filip): Move coordinate mapping to sdk + self._ahrs.Q = self._ahrs.updateIMU( + self._ahrs.Q, np.array([gyro.z, gyro.x, gyro.y]), np.array([accel.z, accel.x, accel.y]) + ) if Topic.ImuData not in self.store.subscriptions: return viewer.log_imu([accel.z, accel.x, accel.y], [gyro.z, gyro.x, gyro.y], self._ahrs.Q, [mag.x, mag.y, mag.z]) @@ -179,14 +146,25 @@ def _on_tof_packet( component: ToFComponent, ) -> None: depth_frame = packet.frame - path = f"{component.camera_socket.name}/transform/tof" + "/Depth" + viewer.log_rigid3( + f"{component.camera_socket.name}/transform", child_from_parent=([0, 0, 0], [1, 0, 0, 0]), xyz="RDF" + ) + intrinsics = np.array([[471.451, 0.0, 317.897], [0.0, 471.539, 245.027], [0.0, 0.0, 1.0]]) + viewer.log_pinhole( + f"{component.camera_socket.name}/transform/tof", + child_from_parent=intrinsics, + width=component.camera_node.getVideoWidth(), + height=component.camera_node.getVideoHeight(), + ) + + path = f"{component.camera_socket.name}/transform/tof/Depth" viewer.log_depth_image(path, depth_frame, meter=1e3) - def _on_detections(self, packet: DetectionPacket, args: AiModelCallbackArgs) -> None: - rects, colors, labels = self._detections_to_rects_colors_labels(packet, args.labels) - cam = cam_kind_from_sensor_kind(args.camera.kind) + def _on_detections(self, packet: DetectionPacket, component: NNComponent) -> None: + rects, colors, labels = self._detections_to_rects_colors_labels(packet, component.get_labels()) + cam = "color_cam" if component._get_camera_comp().is_color() else "mono_cam" viewer.log_rects( - f"{args.camera.board_socket.name}/transform/{cam}/Detections", + f"{component._get_camera_comp()._socket.name}/transform/{cam}/Detections", rects, rect_format=RectFormat.XYXY, colors=colors, @@ -202,7 +180,7 @@ def _detections_to_rects_colors_labels( for detection in packet.detections: rects.append(self._rect_from_detection(detection, packet.frame.shape[0], packet.frame.shape[1])) colors.append([0, 255, 0]) - label: str = detection.label + label: str = detection.label_str # Open model zoo models output label index if omz_labels is not None and isinstance(label, int): label += omz_labels[label] @@ -210,7 +188,7 @@ def _detections_to_rects_colors_labels( labels.append(label) return rects, colors, labels - def _on_age_gender_packet(self, packet: TwoStagePacket, args: AiModelCallbackArgs) -> None: + def _on_age_gender_packet(self, packet: TwoStagePacket, component: NNComponent) -> None: for det, rec in zip(packet.detections, packet.nnData): age = int(float(np.squeeze(np.array(rec.getLayerFp16("age_conv3")))) * 100) gender = np.squeeze(np.array(rec.getLayerFp16("prob"))) @@ -219,9 +197,9 @@ def _on_age_gender_packet(self, packet: TwoStagePacket, args: AiModelCallbackArg color = [255, 0, 0] if gender[0] > gender[1] else [0, 0, 255] # TODO(filip): maybe use viewer.log_annotation_context to log class colors for detections - cam = cam_kind_from_sensor_kind(args.camera.kind) + cam = "color_cam" if component._get_camera_comp().is_color() else "mono_cam" viewer.log_rect( - f"{args.camera.board_socket.name}/transform/{cam}/Detection", + f"{component._get_camera_comp()._socket.name}/transform/{cam}/Detection", self._rect_from_detection(det, packet.frame.shape[0], packet.frame.shape[1]), rect_format=RectFormat.XYXY, color=color, diff --git a/rerun_py/depthai_viewer/requirements.txt b/rerun_py/depthai_viewer/requirements.txt index 600f12662da8..433c05a4c9f2 100644 --- a/rerun_py/depthai_viewer/requirements.txt +++ b/rerun_py/depthai_viewer/requirements.txt @@ -3,7 +3,7 @@ pyarrow==10.0.1 setuptools ahrs # depthai_sdk conflicts with depthai, so it's installed seperatelly in __main__.py -depthai==2.22.0.0 +depthai==2.23.0.0 websockets pydantic==1.9 deprecated