From 45f7f5f0c65f441174f4b8a3d755098257e3c322 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 31 Dec 2024 14:49:31 -0500 Subject: [PATCH 01/31] Introduce EventsExecutor implementation (#1389) Signed-off-by: Brad Martin --- rclpy/CMakeLists.txt | 6 + rclpy/package.xml | 1 + rclpy/rclpy/experimental/__init__.py | 15 + rclpy/rclpy/experimental/events_executor.py | 44 + rclpy/src/rclpy/_rclpy_pybind11.cpp | 3 + .../rclpy/events_executor/events_executor.cpp | 878 ++++++++++++++++++ .../rclpy/events_executor/events_executor.hpp | 186 ++++ .../rclpy/events_executor/python_hasher.hpp | 30 + .../src/rclpy/events_executor/rcl_support.cpp | 112 +++ .../src/rclpy/events_executor/rcl_support.hpp | 108 +++ .../src/rclpy/events_executor/scoped_with.hpp | 39 + .../rclpy/events_executor/timers_manager.cpp | 329 +++++++ .../rclpy/events_executor/timers_manager.hpp | 84 ++ rclpy/test/test_events_executor.py | 659 +++++++++++++ 14 files changed, 2494 insertions(+) create mode 100644 rclpy/rclpy/experimental/__init__.py create mode 100644 rclpy/rclpy/experimental/events_executor.py create mode 100644 rclpy/src/rclpy/events_executor/events_executor.cpp create mode 100644 rclpy/src/rclpy/events_executor/events_executor.hpp create mode 100644 rclpy/src/rclpy/events_executor/python_hasher.hpp create mode 100644 rclpy/src/rclpy/events_executor/rcl_support.cpp create mode 100644 rclpy/src/rclpy/events_executor/rcl_support.hpp create mode 100644 rclpy/src/rclpy/events_executor/scoped_with.hpp create mode 100644 rclpy/src/rclpy/events_executor/timers_manager.cpp create mode 100644 rclpy/src/rclpy/events_executor/timers_manager.hpp create mode 100644 rclpy/test/test_events_executor.py diff --git a/rclpy/CMakeLists.txt b/rclpy/CMakeLists.txt index 15ce01ab1..948fdf541 100644 --- a/rclpy/CMakeLists.txt +++ b/rclpy/CMakeLists.txt @@ -17,6 +17,7 @@ endif() find_package(ament_cmake REQUIRED) find_package(ament_cmake_python REQUIRED) +find_package(Boost REQUIRED) find_package(lifecycle_msgs REQUIRED) find_package(rcl REQUIRED) find_package(rcl_action REQUIRED) @@ -90,6 +91,9 @@ pybind11_add_module(_rclpy_pybind11 SHARED src/rclpy/destroyable.cpp src/rclpy/duration.cpp src/rclpy/clock_event.cpp + src/rclpy/events_executor/events_executor.cpp + src/rclpy/events_executor/rcl_support.cpp + src/rclpy/events_executor/timers_manager.cpp src/rclpy/exceptions.cpp src/rclpy/graph.cpp src/rclpy/guard_condition.cpp @@ -121,6 +125,7 @@ target_include_directories(_rclpy_pybind11 PRIVATE src/rclpy/ ) target_link_libraries(_rclpy_pybind11 PRIVATE + Boost::boost lifecycle_msgs::lifecycle_msgs__rosidl_generator_c lifecycle_msgs::lifecycle_msgs__rosidl_typesupport_c rcl::rcl @@ -182,6 +187,7 @@ if(BUILD_TESTING) test/test_create_node.py test/test_create_while_spinning.py test/test_destruction.py + test/test_events_executor.py test/test_executor.py test/test_expand_topic_name.py test/test_guard_condition.py diff --git a/rclpy/package.xml b/rclpy/package.xml index a08514c6f..4a2fb0f87 100644 --- a/rclpy/package.xml +++ b/rclpy/package.xml @@ -17,6 +17,7 @@ ament_cmake + boost pybind11_vendor python3-dev rcpputils diff --git a/rclpy/rclpy/experimental/__init__.py b/rclpy/rclpy/experimental/__init__.py new file mode 100644 index 000000000..a8997525f --- /dev/null +++ b/rclpy/rclpy/experimental/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2024 Brad Martin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .events_executor import EventsExecutor diff --git a/rclpy/rclpy/experimental/events_executor.py b/rclpy/rclpy/experimental/events_executor.py new file mode 100644 index 000000000..98fd7d082 --- /dev/null +++ b/rclpy/rclpy/experimental/events_executor.py @@ -0,0 +1,44 @@ +# Copyright 2024 Brad Martin +# Copyright 2024 Merlin Labs, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import faulthandler +import typing + +import rclpy.executors +import rclpy.node +from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy + + +# Try to look like we inherit from the rclpy Executor for type checking purposes without +# getting any of the code from the base class. +def EventsExecutor(*, context: rclpy.Context | None = None) -> rclpy.executors.Executor: + if context is None: + context = rclpy.get_default_context() + + # For debugging purposes, if anything goes wrong in C++ make sure we also get a + # Python backtrace dumped with the crash. + faulthandler.enable() + + ex = typing.cast(rclpy.executors.Executor, _rclpy.EventsExecutor(context)) + + # rclpy.Executor does this too. Note, the context itself is smart enough to check + # for bound methods, and check whether the instances they're bound to still exist at + # callback time, so we don't have to worry about tearing down this stale callback at + # destruction time. + # TODO(bmartin427) This should really be done inside of the EventsExecutor + # implementation itself, but I'm unable to figure out a pybind11 incantation that + # allows me to pass this bound method call from C++. + context.on_shutdown(ex.wake) + + return ex diff --git a/rclpy/src/rclpy/_rclpy_pybind11.cpp b/rclpy/src/rclpy/_rclpy_pybind11.cpp index 4da96a5d5..f33fdaa2f 100644 --- a/rclpy/src/rclpy/_rclpy_pybind11.cpp +++ b/rclpy/src/rclpy/_rclpy_pybind11.cpp @@ -32,6 +32,7 @@ #include "duration.hpp" #include "clock_event.hpp" #include "event_handle.hpp" +#include "events_executor/events_executor.hpp" #include "exceptions.hpp" #include "graph.hpp" #include "guard_condition.hpp" @@ -247,4 +248,6 @@ PYBIND11_MODULE(_rclpy_pybind11, m) { rclpy::define_signal_handler_api(m); rclpy::define_clock_event(m); rclpy::define_lifecycle_api(m); + + rclpy::events_executor::define_events_executor(m); } diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp new file mode 100644 index 000000000..8cc964c66 --- /dev/null +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -0,0 +1,878 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Based on a similar approach as the iRobot rclcpp EventsExecutor implementation: +// https://github.com/ros2/rclcpp/blob/7907b2fee0b1381dc21900efd1745e11f5caa670/rclcpp/src/rclcpp/experimental/executors/events_executor/events_executor.cpp +// Original copyright for that file is: +// Copyright 2023 iRobot Corporation. +// +// Also borrows some code from the original rclpy Executor implementations: +// https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py +// Original copyright for that file is: +// Copyright 2017 Open Source Robotics Foundation, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "events_executor/events_executor.hpp" + +#include +#include + +#include + +#include +#include + +#include +#include +#include + +namespace pl = std::placeholders; +namespace py = pybind11; + +namespace rclpy { +namespace events_executor { + +EventsExecutor::EventsExecutor(py::object context) + : rclpy_context_(context), + asyncio_run_(py::module_::import("asyncio").attr("run")), + rclpy_task_(py::module_::import("rclpy.task").attr("Task")), + signals_(io_context_, SIGINT, SIGTERM), + rcl_callback_manager_(io_context_.get_executor()), + timers_manager_(io_context_.get_executor(), + std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) { + // rclpy.Executor creates a sigint handling guard condition here. This is necessary + // because a sleeping event loop won't notice Ctrl-C unless some other event wakes + // it up otherwise. + // + // Unfortunately it doesn't look like we can either support generic guard conditions + // or hook into the existing rclpy signal handling in any other useful way. We'll + // just establish our own signal handling directly instead. This unfortunately + // bypasses the rclpy.init() options that allow a user to disable certain signal + // handlers, but it doesn't look like we can do any better. + signals_.async_wait([this](const boost::system::error_code& ec, int) { + if (!ec) { + py::gil_scoped_acquire gil_acquire; + // Don't call context.try_shutdown() here, because that can call back to us to + // request a blocking shutdown(), which doesn't make any sense because we have to + // be spinning to process the callback that's asked to wait for spinning to stop. + // We'll have to call that later outside of any spin loop. + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 + sigint_pending_.store(true); + io_context_.stop(); + } + }); +} + +EventsExecutor::~EventsExecutor() { shutdown(); } + +pybind11::object EventsExecutor::create_task(py::object callback, py::args args, + const py::kwargs& kwargs) { + // Create and return a rclpy.task.Task() object, and schedule it to be called later. + using py::literals::operator""_a; + py::object task = rclpy_task_(callback, args, kwargs, "executor"_a = py::cast(this)); + // The Task needs to be owned at least until we invoke it from the callback we post, + // however we can't pass a bare py::object because that's going to try to do Python + // refcounting while preparing to go into or coming back from the callback, while the + // GIL is not held. We'll do manual refcounting on it instead. + py::handle cb_task_handle = task; + cb_task_handle.inc_ref(); + boost::asio::post(io_context_, + std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); + return task; +} + +bool EventsExecutor::shutdown(std::optional timeout) { + // NOTE: The rclpy context can invoke this with a lock on the context held. Therefore + // we must not try to go access that context during this method or we can deadlock. + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/context.py#L101-L103 + + io_context_.stop(); + + // Block until spinning is done, or timeout. + std::unique_lock spin_lock(spinning_mutex_, std::defer_lock); + if (timeout) { + if (!spin_lock.try_lock_for(std::chrono::duration(*timeout))) { + return false; + } + } else { + spin_lock.lock(); + } + // Tear down any callbacks we still have registered. + for (py::handle node : py::list(nodes_)) { + remove_node(node); + } + UpdateEntitiesFromNodes(true); + return true; +} + +bool EventsExecutor::add_node(py::object node) { + std::lock_guard lock(nodes_mutex_); + if (nodes_.contains(node)) { + return false; + } + nodes_.add(node); + // Caution, the Node executor setter method calls executor.add_node() again making + // this reentrant. + node.attr("executor") = py::cast(this); + wake(); + return true; +} + +void EventsExecutor::remove_node(py::handle node) { + std::lock_guard lock(nodes_mutex_); + if (!nodes_.contains(node)) { + return; + } + // Why does pybind11 provide a C++ method for add() but not discard() or remove()? + nodes_.attr("remove")(node); + // Not sure why rclpy doesn't change the node.executor at this point + wake(); +} + +void EventsExecutor::wake() { + if (!wake_pending_.exchange(true)) { + // Update tracked entities. + boost::asio::post(io_context_, [this]() { + py::gil_scoped_acquire gil_acquire; + UpdateEntitiesFromNodes(!py::cast(rclpy_context_.attr("ok")())); + }); + } +} + +// NOTE: The timeouts on the below two methods are always realtime even if we're running +// in debug time. This is true of other executors too, because debug time is always +// associated with a specific node and more than one node may be connected to an +// executor instance. +// https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L184-L185 + +void EventsExecutor::spin(std::optional timeout_sec) { + { + std::unique_lock spin_lock(spinning_mutex_, std::try_to_lock); + if (!spin_lock) { + throw std::runtime_error("Attempt to spin an already-spinning Executor"); + } + // Release the GIL while we block. Any callbacks on the io_context that want to + // touch Python will need to reacquire it though. + py::gil_scoped_release gil_release; + // Don't let asio auto stop if there's nothing to do + const auto work = boost::asio::make_work_guard(io_context_); + if (timeout_sec) { + io_context_.run_for(std::chrono::duration_cast( + std::chrono::duration(*timeout_sec))); + } else { + io_context_.run(); + } + io_context_.restart(); + } + + if (sigint_pending_.exchange(false)) { + rclpy_context_.attr("try_shutdown")(); + } +} + +void EventsExecutor::spin_once(std::optional timeout_sec) { + { + std::unique_lock spin_lock(spinning_mutex_, std::try_to_lock); + if (!spin_lock) { + throw std::runtime_error("Attempt to spin an already-spinning Executor"); + } + // Release the GIL while we block. Any callbacks on the io_context that want to + // touch Python will need to reacquire it though. + py::gil_scoped_release gil_release; + // Don't let asio auto stop if there's nothing to do + const auto work = boost::asio::make_work_guard(io_context_); + if (timeout_sec) { + io_context_.run_one_for(std::chrono::duration_cast( + std::chrono::duration(*timeout_sec))); + } else { + io_context_.run_one(); + } + io_context_.restart(); + } + + if (sigint_pending_.exchange(false)) { + rclpy_context_.attr("try_shutdown")(); + } +} + +void EventsExecutor::spin_until_future_complete(py::handle future, + std::optional timeout_sec) { + future.attr("add_done_callback")( + py::cpp_function([this](py::handle) { io_context_.stop(); })); + spin(timeout_sec); +} + +void EventsExecutor::spin_once_until_future_complete( + py::handle future, std::optional timeout_sec) { + future.attr("add_done_callback")( + py::cpp_function([this](py::handle) { io_context_.stop(); })); + spin_once(timeout_sec); +} + +void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) { + // Clear pending flag as early as possible, so we error on the side of retriggering + // a few harmless updates rather than potentially missing important additions. + wake_pending_.store(false); + + // Collect all entities currently associated with our nodes + py::set subscriptions; + py::set timers; + py::set clients; + py::set services; + py::set waitables; + { + std::lock_guard lock(nodes_mutex_); + if (!shutdown) { + for (py::handle node : nodes_) { + subscriptions.attr("update")(py::set(node.attr("subscriptions"))); + timers.attr("update")(py::set(node.attr("timers"))); + clients.attr("update")(py::set(node.attr("clients"))); + services.attr("update")(py::set(node.attr("services"))); + waitables.attr("update")(py::set(node.attr("waitables"))); + + // It doesn't seem to be possible to support guard conditions with a + // callback-based (as opposed to waitset-based) API. Fortunately we don't + // seem to need to. + if (!py::set(node.attr("guards")).empty()) { + throw std::runtime_error("Guard conditions not supported"); + } + } + } else { + // Remove all tracked entities and nodes. + nodes_.clear(); + } + } + + // Perform updates for added and removed entities + UpdateEntitySet(subscriptions_, subscriptions, + std::bind(&EventsExecutor::HandleAddedSubscription, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedSubscription, this, pl::_1)); + UpdateEntitySet(timers_, timers, + std::bind(&EventsExecutor::HandleAddedTimer, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedTimer, this, pl::_1)); + UpdateEntitySet(clients_, clients, + std::bind(&EventsExecutor::HandleAddedClient, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedClient, this, pl::_1)); + UpdateEntitySet(services_, services, + std::bind(&EventsExecutor::HandleAddedService, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedService, this, pl::_1)); + UpdateEntitySet(waitables_, waitables, + std::bind(&EventsExecutor::HandleAddedWaitable, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedWaitable, this, pl::_1)); + + if (shutdown) { + // Stop spinning after everything is torn down. + io_context_.stop(); + } +} + +void EventsExecutor::UpdateEntitySet( + py::set& entity_set, const py::set& new_entity_set, + std::function added_entity_callback, + std::function removed_entity_callback) { + py::set added_entities = new_entity_set - entity_set; + for (py::handle added_entity : added_entities) { + added_entity_callback(added_entity); + } + + py::set removed_entities = entity_set - new_entity_set; + for (py::handle removed_entity : removed_entities) { + removed_entity_callback(removed_entity); + } + + entity_set = new_entity_set; +} + +void EventsExecutor::HandleAddedSubscription(py::handle subscription) { + py::handle handle = subscription.attr("handle"); + auto with = std::make_shared(handle); + const rcl_subscription_t* rcl_ptr = GetRclSubscription(handle); + const auto cb = + std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); + if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback( + rcl_ptr, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { + throw std::runtime_error( + std::string("Failed to set the on new message callback for subscription: ") + + rcl_get_error_string().str); + } +} + +void EventsExecutor::HandleRemovedSubscription(py::handle subscription) { + py::handle handle = subscription.attr("handle"); + const rcl_subscription_t* rcl_ptr = GetRclSubscription(handle); + if (RCL_RET_OK != + rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the on new message callback for subscription: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_ptr); +} + +void EventsExecutor::HandleSubscriptionReady(py::handle subscription, + size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // Largely based on rclpy.Executor._take_subscription() and _execute_subcription(). + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L355-L367 + // + // NOTE: Simple object attributes we can count on to be owned by the parent object, + // but bound method calls and function return values need to be owned by us. + const py::handle handle = subscription.attr("handle"); + const py::object take_message = handle.attr("take_message"); + const py::handle msg_type = subscription.attr("msg_type"); + const py::handle raw = subscription.attr("raw"); + const int callback_type = + py::cast(subscription.attr("_callback_type").attr("value")); + const int message_only = py::cast( + subscription.attr("CallbackType").attr("MessageOnly").attr("value")); + const py::handle callback = subscription.attr("callback"); + + // rmw_cyclonedds has a bug which causes number_of_events to be zero in the case where + // messages were waiting for us when we registered the callback, and the topic is + // using KEEP_ALL history policy. We'll work around that by checking for zero and + // just taking messages until we start getting None in that case. + // https://github.com/ros2/rmw_cyclonedds/issues/509 + bool got_none = false; + for (size_t i = 0; number_of_events ? i < number_of_events : !got_none; ++i) { + py::object msg_info = take_message(msg_type, raw); + if (!msg_info.is_none()) { + try { + if (callback_type == message_only) { + callback(py::cast(msg_info)[0]); + } else { + callback(msg_info); + } + } catch (const py::error_already_set& e) { + HandleCallbackExceptionInNodeEntity(e, subscription, "subscriptions"); + throw; + } + } else { + got_none = true; + } + } +} + +void EventsExecutor::HandleAddedTimer(py::handle timer) { + timers_manager_.AddTimer(timer); +} + +void EventsExecutor::HandleRemovedTimer(py::handle timer) { + timers_manager_.RemoveTimer(timer); +} + +void EventsExecutor::HandleTimerReady(py::handle timer) { + py::gil_scoped_acquire gil_acquire; + + try { + // Unlike most rclpy objects this doesn't document whether it's a Callable or might + // be a Coroutine. Let's hope it's the former. 🤞 + timer.attr("callback")(); + } catch (const py::error_already_set& e) { + HandleCallbackExceptionInNodeEntity(e, timer, "timers"); + throw; + } +} + +void EventsExecutor::HandleAddedClient(py::handle client) { + py::handle handle = client.attr("handle"); + auto with = std::make_shared(handle); + const rcl_client_t* rcl_ptr = GetRclClient(handle); + const auto cb = std::bind(&EventsExecutor::HandleClientReady, this, client, pl::_1); + if (RCL_RET_OK != rcl_client_set_on_new_response_callback( + rcl_ptr, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { + throw std::runtime_error( + std::string("Failed to set the on new response callback for client: ") + + rcl_get_error_string().str); + } +} + +void EventsExecutor::HandleRemovedClient(py::handle client) { + py::handle handle = client.attr("handle"); + const rcl_client_t* rcl_ptr = GetRclClient(handle); + if (RCL_RET_OK != + rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the on new response callback for client: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_ptr); +} + +void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // Largely based on rclpy.Executor._take_client() and _execute_client(). + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L369-L384 + const py::handle handle = client.attr("handle"); + const py::object take_response = handle.attr("take_response"); + const py::handle srv_type = client.attr("srv_type"); + const py::handle res_type = srv_type.attr("Response"); + const py::object get_pending_request = client.attr("get_pending_request"); + + for (size_t i = 0; i < number_of_events; ++i) { + py::tuple seq_and_response = take_response(res_type); + py::handle header = seq_and_response[0]; + py::handle response = seq_and_response[1]; + if (!header.is_none()) { + py::object sequence = header.attr("request_id").attr("sequence_number"); + py::object future; + try { + future = get_pending_request(sequence); + } catch (const py::error_already_set& e) { + if (e.matches(PyExc_KeyError)) { + // The request was cancelled + continue; + } + throw; + } + future.attr("_set_executor")(py::cast(this)); + try { + future.attr("set_result")(response); + } catch (const py::error_already_set& e) { + HandleCallbackExceptionInNodeEntity(e, client, "clients"); + throw; + } + } + } +} + +void EventsExecutor::HandleAddedService(py::handle service) { + py::handle handle = service.attr("handle"); + auto with = std::make_shared(handle); + const rcl_service_t* rcl_ptr = GetRclService(handle); + const auto cb = std::bind(&EventsExecutor::HandleServiceReady, this, service, pl::_1); + if (RCL_RET_OK != rcl_service_set_on_new_request_callback( + rcl_ptr, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { + throw std::runtime_error( + std::string("Failed to set the on new request callback for service: ") + + rcl_get_error_string().str); + } +} + +void EventsExecutor::HandleRemovedService(py::handle service) { + py::handle handle = service.attr("handle"); + const rcl_service_t* rcl_ptr = GetRclService(handle); + if (RCL_RET_OK != + rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the on new request callback for service: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_ptr); +} + +void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // Largely based on rclpy.Executor._take_service() and _execute_service(). + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L386-L397 + const py::handle handle = service.attr("handle"); + const py::object service_take_request = handle.attr("service_take_request"); + const py::handle srv_type = service.attr("srv_type"); + const py::handle req_type = srv_type.attr("Request"); + const py::handle res_type = srv_type.attr("Response"); + const py::handle callback = service.attr("callback"); + const py::object send_response = service.attr("send_response"); + + for (size_t i = 0; i < number_of_events; ++i) { + py::object maybe_request_and_header = service_take_request(req_type); + if (!maybe_request_and_header.is_none()) { + py::tuple request_and_header = py::cast(maybe_request_and_header); + py::handle request = request_and_header[0]; + py::handle header = request_and_header[1]; + if (!request.is_none()) { + py::object response; + try { + response = callback(request, res_type()); + } catch (const py::error_already_set& e) { + HandleCallbackExceptionInNodeEntity(e, service, "services"); + throw; + } + send_response(response, header); + } + } + } +} + +void EventsExecutor::HandleAddedWaitable(py::handle waitable) { + // The Waitable API is too abstract for us to work with directly; it only exposes APIs + // for dealing with wait sets, and all of the rcl callback API requires knowing + // exactly what kinds of rcl objects you're working with. We'll try to figure out + // what kind of stuff is hiding behind the abstraction by having the Waitable add + // itself to a wait set, then take stock of what all ended up there. We'll also have + // to hope that no Waitable implementations ever change their component entities over + // their lifetimes. 😬 + auto with_waitable = std::make_shared(waitable); + const py::object num_entities = waitable.attr("get_num_entities")(); + if (py::cast(num_entities.attr("num_guard_conditions")) != 0) { + throw std::runtime_error("Guard conditions not supported"); + } + const py::object _rclpy = py::module_::import("rclpy.impl.implementation_singleton") + .attr("rclpy_implementation"); + py::object wait_set = _rclpy.attr("WaitSet")( + num_entities.attr("num_subscriptions"), 0, num_entities.attr("num_timers"), + num_entities.attr("num_clients"), num_entities.attr("num_services"), + num_entities.attr("num_events"), rclpy_context_.attr("handle")); + auto with_waitset = std::make_shared(wait_set); + waitable.attr("add_to_wait_set")(wait_set); + rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + // We null out each entry in the waitset as we set it up, so that the waitset itself + // can be reused when something becomes ready to signal to the Waitable what's ready + // and what's not. We also bind with_waitset into each callback we set up, to ensure + // that object doesn't get destroyed while any of these callbacks are still + // registered. + WaitableSubEntities sub_entities; + for (size_t i = 0; i < rcl_waitset->size_of_subscriptions; ++i) { + const rcl_subscription_t* const rcl_sub = rcl_waitset->subscriptions[i]; + rcl_waitset->subscriptions[i] = nullptr; + sub_entities.subscriptions.push_back(rcl_sub); + const auto cb = std::bind(&EventsExecutor::HandleWaitableSubReady, this, waitable, + rcl_sub, wait_set, i, with_waitset, pl::_1); + if (RCL_RET_OK != + rcl_subscription_set_on_new_message_callback( + rcl_sub, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_sub, cb, with_waitable))) { + throw std::runtime_error( + std::string( + "Failed to set the on new message callback for Waitable subscription: ") + + rcl_get_error_string().str); + } + } + for (size_t i = 0; i < rcl_waitset->size_of_timers; ++i) { + // Unfortunately we do require a non-const pointer here, while the waitset structure + // contains a const pointer. + rcl_timer_t* const rcl_timer = const_cast(rcl_waitset->timers[i]); + rcl_waitset->timers[i] = nullptr; + sub_entities.timers.push_back(rcl_timer); + // Since this callback doesn't go through RclCallbackManager which would otherwise + // own an instance of `with_waitable` associated with this callback, we'll bind it + // directly into the callback instead. + const auto cb = std::bind(&EventsExecutor::HandleWaitableTimerReady, this, waitable, + rcl_timer, wait_set, i, with_waitable, with_waitset); + timers_manager_.rcl_manager().AddTimer(rcl_timer, cb); + } + for (size_t i = 0; i < rcl_waitset->size_of_clients; ++i) { + const rcl_client_t* const rcl_client = rcl_waitset->clients[i]; + rcl_waitset->clients[i] = nullptr; + sub_entities.clients.push_back(rcl_client); + const auto cb = std::bind(&EventsExecutor::HandleWaitableClientReady, this, + waitable, rcl_client, wait_set, i, with_waitset, pl::_1); + if (RCL_RET_OK != + rcl_client_set_on_new_response_callback( + rcl_client, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_client, cb, with_waitable))) { + throw std::runtime_error( + std::string( + "Failed to set the on new response callback for Waitable client: ") + + rcl_get_error_string().str); + } + } + for (size_t i = 0; i < rcl_waitset->size_of_services; ++i) { + const rcl_service_t* const rcl_service = rcl_waitset->services[i]; + rcl_waitset->services[i] = nullptr; + sub_entities.services.push_back(rcl_service); + const auto cb = std::bind(&EventsExecutor::HandleWaitableServiceReady, this, + waitable, rcl_service, wait_set, i, with_waitset, pl::_1); + if (RCL_RET_OK != + rcl_service_set_on_new_request_callback( + rcl_service, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_service, cb, with_waitable))) { + throw std::runtime_error( + std::string( + "Failed to set the on new request callback for Waitable service: ") + + rcl_get_error_string().str); + } + } + for (size_t i = 0; i < rcl_waitset->size_of_events; ++i) { + const rcl_event_t* const rcl_event = rcl_waitset->events[i]; + rcl_waitset->events[i] = nullptr; + sub_entities.events.push_back(rcl_event); + const auto cb = std::bind(&EventsExecutor::HandleWaitableEventReady, this, waitable, + rcl_event, wait_set, i, with_waitset, pl::_1); + if (RCL_RET_OK != rcl_event_set_callback(rcl_event, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback( + rcl_event, cb, with_waitable))) { + throw std::runtime_error( + std::string("Failed to set the callback for Waitable event: ") + + rcl_get_error_string().str); + } + } + + // Save the set of discovered sub-entities for later use during tear-down since we + // can't repeat the wait set trick then, as the RCL context may already be destroyed + // at that point. + waitable_entities_[waitable] = std::move(sub_entities); +} + +void EventsExecutor::HandleRemovedWaitable(py::handle waitable) { + const auto nh = waitable_entities_.extract(waitable); + if (!nh) { + throw std::runtime_error("Couldn't find sub-entities entry for removed Waitable"); + } + const WaitableSubEntities& sub_entities = nh.mapped(); + for (const rcl_subscription_t* const rcl_sub : sub_entities.subscriptions) { + if (RCL_RET_OK != + rcl_subscription_set_on_new_message_callback(rcl_sub, nullptr, nullptr)) { + throw std::runtime_error(std::string("Failed to clear the on new message " + "callback for Waitable subscription: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_sub); + } + for (rcl_timer_t* const rcl_timer : sub_entities.timers) { + timers_manager_.rcl_manager().RemoveTimer(rcl_timer); + } + for (const rcl_client_t* const rcl_client : sub_entities.clients) { + if (RCL_RET_OK != + rcl_client_set_on_new_response_callback(rcl_client, nullptr, nullptr)) { + throw std::runtime_error(std::string("Failed to clear the on new response " + "callback for Waitable client: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_client); + } + for (const rcl_service_t* const rcl_service : sub_entities.services) { + if (RCL_RET_OK != + rcl_service_set_on_new_request_callback(rcl_service, nullptr, nullptr)) { + throw std::runtime_error(std::string("Failed to clear the on new request " + "callback for Waitable service: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_service); + } + for (const rcl_event_t* const rcl_event : sub_entities.events) { + if (RCL_RET_OK != rcl_event_set_callback(rcl_event, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the callback for Waitable event: ") + + rcl_get_error_string().str); + } + rcl_callback_manager_.RemoveCallback(rcl_event); + } +} + +void EventsExecutor::HandleWaitableSubReady( + py::handle waitable, const rcl_subscription_t* rcl_sub, py::handle wait_set, + size_t wait_set_sub_index, std::shared_ptr, size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // We need to set up the wait set to make it look like our subscription object is + // ready, and then poke the Waitable to do what it needs to do from there. + rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_waitset->subscriptions[wait_set_sub_index] = rcl_sub; + HandleWaitableReady(waitable, wait_set, number_of_events); + // Null out the wait set again so that other callbacks can use it on other objects. + rcl_waitset->subscriptions[wait_set_sub_index] = nullptr; +} + +void EventsExecutor::HandleWaitableTimerReady(py::handle waitable, + const rcl_timer_t* rcl_timer, + py::handle wait_set, + size_t wait_set_timer_index, + std::shared_ptr, + std::shared_ptr) { + py::gil_scoped_acquire gil_acquire; + + // We need to set up the wait set to make it look like our timer object is ready, and + // then poke the Waitable to do what it needs to do from there. + rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_waitset->timers[wait_set_timer_index] = rcl_timer; + HandleWaitableReady(waitable, wait_set, 1); + // Null out the wait set again so that other callbacks can use it on other objects. + rcl_waitset->timers[wait_set_timer_index] = nullptr; +} + +void EventsExecutor::HandleWaitableClientReady(py::handle waitable, + const rcl_client_t* rcl_client, + py::handle wait_set, + size_t wait_set_client_index, + std::shared_ptr, + size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // We need to set up the wait set to make it look like our client object is ready, and + // then poke the Waitable to do what it needs to do from there. + rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_waitset->clients[wait_set_client_index] = rcl_client; + HandleWaitableReady(waitable, wait_set, number_of_events); + // Null out the wait set again so that other callbacks can use it on other objects. + rcl_waitset->clients[wait_set_client_index] = nullptr; +} + +void EventsExecutor::HandleWaitableServiceReady(py::handle waitable, + const rcl_service_t* rcl_service, + py::handle wait_set, + size_t wait_set_service_index, + std::shared_ptr, + size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // We need to set up the wait set to make it look like our service object is ready, + // and then poke the Waitable to do what it needs to do from there. + rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_waitset->services[wait_set_service_index] = rcl_service; + HandleWaitableReady(waitable, wait_set, number_of_events); + // Null out the wait set again so that other callbacks can use it on other objects. + rcl_waitset->services[wait_set_service_index] = nullptr; +} + +void EventsExecutor::HandleWaitableEventReady( + py::handle waitable, const rcl_event_t* rcl_event, py::handle wait_set, + size_t wait_set_event_index, std::shared_ptr, size_t number_of_events) { + py::gil_scoped_acquire gil_acquire; + + // We need to set up the wait set to make it look like our event object is ready, and + // then poke the Waitable to do what it needs to do from there. + rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_waitset->events[wait_set_event_index] = rcl_event; + HandleWaitableReady(waitable, wait_set, number_of_events); + // Null out the wait set again so that other callbacks can use it on other objects. + rcl_waitset->events[wait_set_event_index] = nullptr; +} + +void EventsExecutor::HandleWaitableReady(py::handle waitable, py::handle wait_set, + size_t number_of_events) { + // Largely based on rclpy.Executor._take_waitable() + // https://github.com/ros2/rclpy/blob/a19180c238d4d97ed2b58868d8fb7fa3e3b621f2/rclpy/rclpy/executors.py#L447-L454 + py::object is_ready = waitable.attr("is_ready"); + py::object take_data = waitable.attr("take_data"); + py::object execute = waitable.attr("execute"); + py::object futures = waitable.attr("_futures"); + for (auto& future : futures) { + future.attr("_set_executor")(py::cast(this)); + } + for (size_t i = 0; i < number_of_events; ++i) { + // This method can have side effects, so it needs to be called even though it looks + // like just an accessor. + if (!is_ready(wait_set)) { + throw std::runtime_error("Failed to make Waitable ready"); + } + py::object data = take_data(); + try { + // execute() is an async method, we need to use asyncio to run it + // TODO(bmartin427) Don't run all of this immediately, blocking everything else + asyncio_run_(execute(data)); + } catch (const py::error_already_set& e) { + HandleCallbackExceptionInNodeEntity(e, waitable, "waitables"); + throw; + } + } +} + +void EventsExecutor::IterateTask(py::handle task) { + py::gil_scoped_acquire gil_acquire; + // Calling this won't throw, but it may set the exception property on the task object. + task(); + if (task.attr("done")()) { + py::object ex = task.attr("exception")(); + // Drop reference with GIL held. This doesn't necessarily destroy the underlying + // Task, since the `create_task()` caller may have retained a reference to the + // returned value. + task.dec_ref(); + + if (!ex.is_none()) { + // It's not clear how to easily turn a Python exception into a C++ one, so let's + // just throw it again and let pybind translate it normally. + py::dict scope; + scope["ex"] = ex; + try { + py::exec("raise ex", scope); + } catch (py::error_already_set& cpp_ex) { + // There's no good way to know what node this task came from. If we only have + // one node, we can use the logger from that, otherwise we'll have to leave it + // undefined. + py::object logger = py::none(); + if (nodes_.size() == 1) { + logger = nodes_[0].attr("get_logger")(); + } + HandleCallbackExceptionWithLogger(cpp_ex, logger, "task"); + throw; + } + } + } else { + // Task needs more iteration. Post back to the asio loop again. + // TODO(bmartin427) Not sure this is correct; in particular, it's unclear how a task + // that needs to wait a while can avoid either blocking or spinning. Revisit when + // asyncio support is intentionally added. + boost::asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, task)); + } +} + +void EventsExecutor::HandleCallbackExceptionInNodeEntity( + const py::error_already_set& exc, py::handle entity, + const std::string& node_entity_attr) { + // Try to identify the node associated with the entity that threw the exception, so we + // can log to it. + for (py::handle node : nodes_) { + if (py::set(node.attr(node_entity_attr.c_str())).contains(entity)) { + HandleCallbackExceptionWithLogger(exc, node.attr("get_logger")(), + node_entity_attr); + return; + } + } + + // Failed to find a node + HandleCallbackExceptionWithLogger(exc, py::none(), node_entity_attr); +} + +void EventsExecutor::HandleCallbackExceptionWithLogger(const py::error_already_set& exc, + py::object logger, + const std::string& entity_type) { + if (logger.is_none()) { + py::object logging = py::module_::import("rclpy.logging"); + logger = logging.attr("get_logger")("UNKNOWN"); + } + + // The logger API won't let you call it with two different severities, from what it + // considers the same code location. Since it has no visibility into C++, all calls + // made from here will be attributed to the python that last called into here. + // Instead we will call out to python for logging. + py::dict scope; + scope["logger"] = logger; + scope["node_entity_attr"] = entity_type; + scope["exc_value"] = exc.value(); + scope["exc_trace"] = exc.trace(); + py::exec( + R"( +import traceback +logger.fatal(f"Exception in '{node_entity_attr}' callback: {exc_value}") +logger.warn("Error occurred at:\n" + "".join(traceback.format_tb(exc_trace))) +)", + scope); +} + +// pybind11 module bindings + +void define_events_executor(py::object module) { + py::class_(module, "EventsExecutor") + .def(py::init(), py::arg("context")) + .def_property_readonly("context", &EventsExecutor::get_context) + .def("create_task", &EventsExecutor::create_task, py::arg("callback")) + .def("shutdown", &EventsExecutor::shutdown, py::arg("timeout_sec") = py::none()) + .def("add_node", &EventsExecutor::add_node, py::arg("node")) + .def("remove_node", &EventsExecutor::remove_node, py::arg("node")) + .def("wake", &EventsExecutor::wake) + .def("spin", [](EventsExecutor& exec) { exec.spin(); }) + .def("spin_once", &EventsExecutor::spin_once, py::arg("timeout_sec") = py::none()) + .def("spin_until_future_complete", &EventsExecutor::spin_until_future_complete, + py::arg("future"), py::arg("timeout_sec") = py::none()) + .def("spin_once_until_future_complete", + &EventsExecutor::spin_once_until_future_complete, py::arg("future"), + py::arg("timeout_sec") = py::none()); +} + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp new file mode 100644 index 000000000..513250f1b --- /dev/null +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -0,0 +1,186 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "rcl_support.hpp" +#include "scoped_with.hpp" +#include "timers_manager.hpp" + +namespace rclpy { +namespace events_executor { + +/// Events executor implementation for rclpy +/// +/// This executor implementation attempts to replicate the function of the rclcpp +/// EventsExecutor for the benefit of rclpy applications. It is implemented in C++ to +/// minimize the overhead of processing the event loop. +/// +/// We assume all public methods could be invoked from any thread. Callbacks on the +/// executor loop will be issued on the thread that called one of the spin*() variants +/// (ignoring any parallelism that might be allowed by the callback group +/// configuration). +class EventsExecutor { + public: + /// @param context the rclpy Context object to operate on + explicit EventsExecutor(pybind11::object context); + + ~EventsExecutor(); + + // rclpy Executor API methods: + pybind11::object get_context() const { return rclpy_context_; } + pybind11::object create_task(pybind11::object callback, pybind11::args args, + const pybind11::kwargs& kwargs); + bool shutdown(std::optional timeout_sec = {}); + bool add_node(pybind11::object node); + void remove_node(pybind11::handle node); + void wake(); + void spin(std::optional timeout_sec = {}); + void spin_once(std::optional timeout_sec = {}); + void spin_until_future_complete(pybind11::handle future, + std::optional timeout_sec = {}); + void spin_once_until_future_complete(pybind11::handle future, + std::optional timeout_sec = {}); + + private: + // Structure to hold entities discovered underlying a Waitable object. + struct WaitableSubEntities { + std::vector subscriptions; + std::vector timers; // Can't be const + std::vector clients; + std::vector services; + std::vector events; + }; + + /// Updates the sets of known entities based on the currently tracked nodes. This is + /// not thread safe, so it must be posted to the io_context if the executor is + /// currently spinning. Expects the GIL to be held before calling. If @p shutdown is + /// true, a purge of all known nodes and entities is forced. + void UpdateEntitiesFromNodes(bool shutdown); + + /// Given an existing set of entities and a set with the desired new state, updates + /// the existing set and invokes callbacks on each added or removed entity. + void UpdateEntitySet(pybind11::set& entity_set, const pybind11::set& new_entity_set, + std::function added_entity_callback, + std::function removed_entity_callback); + + void HandleAddedSubscription(pybind11::handle); + void HandleRemovedSubscription(pybind11::handle); + void HandleSubscriptionReady(pybind11::handle, size_t number_of_events); + + void HandleAddedTimer(pybind11::handle); + void HandleRemovedTimer(pybind11::handle); + void HandleTimerReady(pybind11::handle); + + void HandleAddedClient(pybind11::handle); + void HandleRemovedClient(pybind11::handle); + void HandleClientReady(pybind11::handle, size_t number_of_events); + + void HandleAddedService(pybind11::handle); + void HandleRemovedService(pybind11::handle); + void HandleServiceReady(pybind11::handle, size_t number_of_events); + + void HandleAddedWaitable(pybind11::handle); + void HandleRemovedWaitable(pybind11::handle); + void HandleWaitableSubReady(pybind11::handle waitable, const rcl_subscription_t*, + pybind11::handle wait_set, size_t wait_set_sub_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableTimerReady(pybind11::handle waitable, const rcl_timer_t*, + pybind11::handle wait_set, size_t wait_set_timer_index, + std::shared_ptr with_waitable, + std::shared_ptr with_waitset); + void HandleWaitableClientReady(pybind11::handle waitable, const rcl_client_t*, + pybind11::handle wait_set, + size_t wait_set_client_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableServiceReady(pybind11::handle waitable, const rcl_service_t*, + pybind11::handle wait_set, + size_t wait_set_service_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableEventReady(pybind11::handle waitable, const rcl_event_t*, + pybind11::handle wait_set, size_t wait_set_event_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableReady(pybind11::handle waitable, pybind11::handle wait_set, + size_t number_of_events); + + /// Helper for create_task(). @p task needs to have had one reference manually added + /// to it. See create_task() implementation for details. + void IterateTask(pybind11::handle task); + + void HandleCallbackExceptionInNodeEntity(const pybind11::error_already_set&, + pybind11::handle entity, + const std::string& node_entity_attr); + void HandleCallbackExceptionWithLogger(const pybind11::error_already_set&, + pybind11::object logger, + const std::string& entity_type); + + const pybind11::object rclpy_context_; + + // Imported python objects we depend on + const pybind11::object asyncio_run_; + const pybind11::object rclpy_task_; + + boost::asio::io_context io_context_; + boost::asio::signal_set signals_; + + std::recursive_mutex nodes_mutex_; ///< Protects the node set + pybind11::set nodes_; ///< The set of all nodes we're executing + std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made + std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning + std::atomic sigint_pending_{}; + + // Collection of awaitable entities we're servicing + pybind11::set subscriptions_; + pybind11::set timers_; + pybind11::set clients_; + pybind11::set services_; + pybind11::set waitables_; + + /// Cache for rcl pointers underlying each waitables_ entry, because those are harder + /// to retrieve than the other entity types. + std::unordered_map + waitable_entities_; + + RclCallbackManager rcl_callback_manager_; + TimersManager timers_manager_; +}; + +void define_events_executor(pybind11::object module); + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/python_hasher.hpp b/rclpy/src/rclpy/events_executor/python_hasher.hpp new file mode 100644 index 000000000..1ff9f8c2b --- /dev/null +++ b/rclpy/src/rclpy/events_executor/python_hasher.hpp @@ -0,0 +1,30 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include + +namespace rclpy { +namespace events_executor { +/// This is intended to be used as the Hash template arg to STL containers using a +/// pybind11::handle as a Key. This is the same hash that a native Python dict or set +/// would use given the same key. +struct PythonHasher { + inline ssize_t operator()(const pybind11::handle& handle) const { + return pybind11::hash(handle); + } +}; +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/rcl_support.cpp b/rclpy/src/rclpy/events_executor/rcl_support.cpp new file mode 100644 index 000000000..1a53a9e7e --- /dev/null +++ b/rclpy/src/rclpy/events_executor/rcl_support.cpp @@ -0,0 +1,112 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "events_executor/rcl_support.hpp" + +#include +#include +#include + +#include + +namespace py = pybind11; + +namespace rclpy { +namespace events_executor { + +extern "C" void RclEventCallbackTrampoline(const void* user_data, + size_t number_of_events) { + const auto cb = reinterpret_cast*>(user_data); + (*cb)(number_of_events); +} + +RclCallbackManager::RclCallbackManager(const boost::asio::any_io_executor& executor) + : executor_(executor) {} + +RclCallbackManager::~RclCallbackManager() { + // Should not still have any callbacks registered when we exit, because otherwise RCL + // can call pointers that will no longer be valid. We can't throw an exception here, + // but we can explode. + if (!owned_cbs_.empty()) { + py::gil_scoped_acquire gil_acquire; + py::print("Destroying callback manager with callbacks remaining"); + ::abort(); + } +} + +const void* RclCallbackManager::MakeCallback(const void* key, + std::function callback, + std::shared_ptr with) { + // We don't support replacing an existing callback with a new one, because it gets + // tricky making sure we don't delete an old callback while the middleware still holds + // a pointer to it. + if (owned_cbs_.count(key) != 0) { + throw py::key_error("Attempt to replace existing callback"); + } + CbEntry new_entry; + new_entry.cb = std::make_unique>( + [this, callback, key](size_t number_of_events) { + boost::asio::post(executor_, [this, callback, key, number_of_events]() { + if (!owned_cbs_.count(key)) { + // This callback has been removed, just drop it as the objects it may want + // to touch may no longer exist. + return; + } + callback(number_of_events); + }); + }); + new_entry.with = with; + const void* ret = new_entry.cb.get(); + owned_cbs_[key] = std::move(new_entry); + return ret; +} + +void RclCallbackManager::RemoveCallback(const void* key) { + if (!owned_cbs_.erase(key)) { + throw py::key_error("Attempt to remove nonexistent callback"); + } +} + +namespace { +// This helper function is used for retrieving rcl pointers from _rclpy C++ objects. +// Because _rclpy doesn't install its C++ headers for public use, it's difficult to use +// the C++ classes directly. But, we can treat them as if they are Python objects using +// their defined Python API. Unfortunately the python interfaces convert the returned +// pointer to integers, so recovering that looks a little weird. +template +RclT* GetRclPtr(py::handle py_ent_handle) { + return reinterpret_cast(py::cast(py_ent_handle.attr("pointer"))); +} +} // namespace + +rcl_subscription_t* GetRclSubscription(py::handle sub_handle) { + return GetRclPtr(sub_handle); +} + +rcl_timer_t* GetRclTimer(py::handle timer_handle) { + return GetRclPtr(timer_handle); +} + +rcl_client_t* GetRclClient(py::handle cl_handle) { + return GetRclPtr(cl_handle); +} + +rcl_service_t* GetRclService(py::handle srv_handle) { + return GetRclPtr(srv_handle); +} + +rcl_wait_set_t* GetRclWaitSet(py::handle ws) { return GetRclPtr(ws); } + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/rcl_support.hpp b/rclpy/src/rclpy/events_executor/rcl_support.hpp new file mode 100644 index 000000000..982461bf1 --- /dev/null +++ b/rclpy/src/rclpy/events_executor/rcl_support.hpp @@ -0,0 +1,108 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include "scoped_with.hpp" + +namespace rclpy { +namespace events_executor { + +/// Use this for all RCL event callbacks. Use the return value from +/// RclCallbackManager::MakeCallback() as the user data arg. +/// +/// Note that RCL callbacks are invoked in some arbitrary thread originating from the +/// middleware. Callbacks should process quickly to avoid blocking the middleware; i.e. +/// all actual work should be posted to an asio loop in another thread. +extern "C" void RclEventCallbackTrampoline(const void* user_data, + size_t number_of_events); + +/// Creates and maintains callback wrappers used with the RCL C library. +class RclCallbackManager { + public: + /// All user callbacks will be posted on the @p executor given to the constructor. + /// These callbacks will be invoked without the Python Global Interpreter Lock held, + /// so if they need to access Python at all make sure to acquire that explicitly. + explicit RclCallbackManager(const boost::asio::any_io_executor& executor); + ~RclCallbackManager(); + + /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a + /// pointer to the rcl object that will be associated with the callback. @p with + /// protects the _rclpy object handle owning the RCL object, for the duration the + /// callback is established. + const void* MakeCallback(const void* key, std::function callback, + std::shared_ptr with); + + /// Discard a previously constructed callback. @p key should be the same value + /// provided to MakeCallback(). Caution: ensure that RCL is no longer using a + /// callback before invoking this. + void RemoveCallback(const void* key); + + private: + /// The C RCL interface deals in raw pointers, so someone needs to own the C++ + /// function objects we'll be calling into. We use unique pointers so the raw pointer + /// to the object remains stable while the map is manipulated. + struct CbEntry { + std::unique_ptr> cb; + std::shared_ptr with; + }; + + boost::asio::any_io_executor executor_; + + /// The map key is the raw pointer to the RCL entity object (subscription, etc) + /// associated with the callback. + std::unordered_map owned_cbs_; +}; + +/// Returns the RCL subscription object pointer from a subscription handle (i.e. the +/// handle attribute of an rclpy Subscription object, which is a _rclpy C++ +/// Subscription object). Assumes that a ScopedWith has already been entered on the +/// given handle. +rcl_subscription_t* GetRclSubscription(pybind11::handle); + +/// Returns the RCL timer object pointer from a timer handle (i.e. the handle attribute +/// of an rclpy Timer object, which is a _rclpy C++ Timer object). Assumes that a +/// ScopedWith has already been entered on the given handle. +rcl_timer_t* GetRclTimer(pybind11::handle); + +/// Returns the RCL client object pointer from a client handle (i.e. the handle +/// attribute of an rclpy Client object, which is a _rclpy C++ Client object). Assumes +/// that a ScopedWith has already been entered on the given handle. +rcl_client_t* GetRclClient(pybind11::handle); + +/// Returns the RCL service object pointer from a service handle (i.e. the handle +/// attribute of an rclpy Service object, which is a _rclpy C++ Service object). +/// Assumes that a ScopedWith has already been entered on the given handle. +rcl_service_t* GetRclService(pybind11::handle); + +/// Returns the RCL wait set object pointer from a _rclpy C++ WaitSet object. Assumes +/// that a ScopedWith has already been entered on the given object. +rcl_wait_set_t* GetRclWaitSet(pybind11::handle); + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/scoped_with.hpp b/rclpy/src/rclpy/events_executor/scoped_with.hpp new file mode 100644 index 000000000..53e3e6daa --- /dev/null +++ b/rclpy/src/rclpy/events_executor/scoped_with.hpp @@ -0,0 +1,39 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include + +namespace rclpy { +namespace events_executor { + +/// Enters a python context manager for the scope of this object instance. +class ScopedWith { + public: + explicit ScopedWith(pybind11::handle object) + : object_(pybind11::cast(object)) { + object_.attr("__enter__")(); + } + + ~ScopedWith() { + object_.attr("__exit__")(pybind11::none(), pybind11::none(), pybind11::none()); + } + + private: + pybind11::object object_; +}; + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp new file mode 100644 index 000000000..2318754e5 --- /dev/null +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -0,0 +1,329 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "events_executor/timers_manager.hpp" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "rcl_support.hpp" + +namespace py = pybind11; + +namespace rclpy { +namespace events_executor { + +namespace { + +// Implementation note: the original iRobot TimersManager associated with the rclcpp +// EventsExecutor maintained a heap with all outstanding timers sorted by their next +// expiration time. Here that approach will be skipped in favor of just looking at +// every timer every update for the following reasons: +// +// * This approach is simpler +// * In the applications this has been used in so far, no Node types exist that have a +// large number of timers outstanding at once. Assuming no more than a few timers +// exist in the whole process, the heap seems like overkill. +// * Every time a timer ticks or is reset, the heap needs to be resorted anyway. +// * The rcl timer interface doesn't expose any way to get timer expiration info in +// absolute terms; you can only find out 'time until next callback'. This means if +// you are trying to sort the list of timers, the 'value' of each entry in the heap +// changes depending on when you look at it during the process of sorting. +// +// We will however yell a bit if we ever see a large number of timers that disproves +// this assumption, so we can reassess this decision. +constexpr size_t WARN_TIMERS_COUNT = 8; + +typedef std::function ClockJumpCallbackT; +typedef std::function TimerResetCallbackT; + +extern "C" void RclClockJumpTrampoline(const rcl_time_jump_t* time_jump, + bool before_jump, void* user_data) { + // rcl calls this both before and after a clock change, and we never want the before + // callback, so let's just eliminate that case early. + if (before_jump) { + return; + } + auto cb = reinterpret_cast(user_data); + (*cb)(time_jump); +} + +extern "C" void RclTimerResetTrampoline(const void* user_data, size_t) { + auto cb = reinterpret_cast(user_data); + (*cb)(); +} + +} // namespace + +/// Manages a single clock source, and all timers operating on that source. All methods +/// (including construction and destruction) are assumed to happen on the thread running +/// the provided asio executor. +class RclTimersManager::ClockManager { + public: + ClockManager(const boost::asio::any_io_executor& executor, rcl_clock_t* clock) + : executor_(executor), clock_(clock) { + // Need to establish a clock jump callback so we can tell when debug time is + // updated. + rcl_jump_threshold_t threshold{ + .on_clock_change = true, .min_forward = 1, .min_backward = -1}; + // Note, this callback could happen on any thread + jump_cb_ = [this](const rcl_time_jump_t* time_jump) { + bool on_debug{}; + switch (time_jump->clock_change) { + case RCL_ROS_TIME_NO_CHANGE: + case RCL_ROS_TIME_ACTIVATED: + on_debug = true; + break; + case RCL_ROS_TIME_DEACTIVATED: + case RCL_SYSTEM_TIME_NO_CHANGE: + on_debug = false; + break; + } + boost::asio::post(executor_, + std::bind(&ClockManager::HandleJump, this, on_debug)); + }; + if (RCL_RET_OK != rcl_clock_add_jump_callback(clock_, threshold, + RclClockJumpTrampoline, &jump_cb_)) { + throw std::runtime_error(std::string("Failed to set RCL clock jump callback: ") + + rcl_get_error_string().str); + } + + // This isn't necessary yet but every timer will eventually depend on it. Again, + // this could happen on any thread. + reset_cb_ = [this]() { + boost::asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); + }; + + // Initialize which timebase we're on + if (clock_->type == RCL_ROS_TIME) { + if (RCL_RET_OK != rcl_is_enabled_ros_time_override(clock_, &on_debug_time_)) { + throw std::runtime_error( + std::string("Failed to get RCL clock override state: ") + + rcl_get_error_string().str); + } + } + } + + ~ClockManager() { + if (RCL_RET_OK != + rcl_clock_remove_jump_callback(clock_, RclClockJumpTrampoline, &jump_cb_)) { + py::gil_scoped_acquire gil_acquire; + py::print(std::string("Failed to remove RCL clock jump callback: ") + + rcl_get_error_string().str); + } + while (!timers_.empty()) { + RemoveTimer(timers_.begin()->first); + } + } + + bool empty() const { return timers_.empty(); } + + void AddTimer(rcl_timer_t* timer, std::function ready_callback) { + // All timers have the same reset callback + if (RCL_RET_OK != + rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) { + throw std::runtime_error(std::string("Failed to set timer reset callback: ") + + rcl_get_error_string().str); + } + timers_[timer] = ready_callback; + if (timers_.size() == WARN_TIMERS_COUNT) { + py::print("Warning, the number of timers associated with this clock is large."); + py::print("Management of this number of timers may be inefficient."); + } + UpdateTimers(); + } + + void RemoveTimer(rcl_timer_t* timer) { + auto it = timers_.find(timer); + if (it == timers_.end()) { + throw py::key_error("Attempt to remove unmanaged timer"); + } + + if (RCL_RET_OK != rcl_timer_set_on_reset_callback(timer, nullptr, nullptr)) { + throw std::runtime_error(std::string("Failed to clear timer reset callback: ") + + rcl_get_error_string().str); + } + timers_.erase(it); + // We could re-evaluate how long we need to block for now that a timer has been + // removed; but, there's no real harm in one extra wakeup that then decides it + // doesn't need to do anything, and this timer might not even be the next to fire, + // so we won't bother. + } + + private: + void HandleJump(bool on_debug_time) { + on_debug_time_ = on_debug_time; + UpdateTimers(); + } + + void UpdateTimers() { + // First, evaluate all of our timers and dispatch any that are ready now. While + // we're at it, keep track of the earliest next timer callback that is due. + std::optional next_ready_ns; + for (const auto& timer_cb_pair : timers_) { + auto next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); + if (next_call_ns <= 0) { + // This just notifies RCL that we're considering the timer triggered, for the + // purposes of updating the next trigger time. + const auto ret = rcl_timer_call(timer_cb_pair.first); + switch (ret) { + case RCL_RET_OK: + break; + case RCL_RET_TIMER_CANCELED: + // Someone apparently canceled the timer *after* we just queried the next + // call time? Nevermind, then... + rcl_reset_error(); + continue; + default: + throw std::runtime_error(std::string("Failed to call RCL timer: ") + + rcl_get_error_string().str); + } + + // Post the user callback to be invoked later once timing-sensitive code is + // done. + boost::asio::post(executor_, timer_cb_pair.second); + + // Update time until *next* call. + next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); + } + if (!next_ready_ns || (next_call_ns < *next_ready_ns)) { + next_ready_ns = next_call_ns; + } + } + + // If we're not on debug time, we should schedule another wakeup when we + // anticipate the next timer being ready. If we are, we'll just re-check + // everything at the next jump callback. + if (!on_debug_time_ && next_ready_ns) { + // Boost doesn't support nanoseconds by default. Round the conversion up so we + // don't wake early and find nothing to do. + const int64_t sleep_us = (*next_ready_ns + 999) / 1000; + next_update_wait_.expires_from_now(boost::posix_time::microseconds(sleep_us)); + next_update_wait_.async_wait([this](const boost::system::error_code& ec) { + if (!ec) { + UpdateTimers(); + } else if (ec != boost::asio::error::operation_aborted) { + throw std::runtime_error("Error waiting for next timer: " + ec.message()); + } + }); + } else { + next_update_wait_.cancel(); + } + } + + /// Returns the number of nanoseconds until the next callback on the given timer is + /// due. Value may be negative or zero if callback time has already been reached. + /// Returns std::nullopt if the timer is canceled. + static std::optional GetNextCallNanoseconds(const rcl_timer_t* rcl_timer) { + int64_t time_until_next_call{}; + const rcl_ret_t ret = + rcl_timer_get_time_until_next_call(rcl_timer, &time_until_next_call); + switch (ret) { + case RCL_RET_OK: + return time_until_next_call; + case RCL_RET_TIMER_CANCELED: + return {}; + default: + throw std::runtime_error(std::string("Failed to fetch timer ready time: ") + + rcl_get_error_string().str); + } + } + + boost::asio::any_io_executor executor_; + rcl_clock_t* const clock_; + ClockJumpCallbackT jump_cb_; + TimerResetCallbackT reset_cb_; + bool on_debug_time_{}; + + std::unordered_map> timers_; + boost::asio::deadline_timer next_update_wait_{executor_}; +}; + +RclTimersManager::RclTimersManager(const boost::asio::any_io_executor& executor) + : executor_(executor) {} + +RclTimersManager::~RclTimersManager() {} + +namespace { +rcl_clock_t* GetTimerClock(rcl_timer_t* timer) { + rcl_clock_t* clock{}; + if (RCL_RET_OK != rcl_timer_clock(timer, &clock)) { + throw std::runtime_error(std::string("Failed to determine clock for timer: ") + + rcl_get_error_string().str); + } + return clock; +} +} // namespace + +void RclTimersManager::AddTimer(rcl_timer_t* timer, + std::function ready_callback) { + // Figure out the clock this timer is using, make sure a manager exists for that + // clock, then forward the timer to that clock's manager. + rcl_clock_t* clock = GetTimerClock(timer); + auto it = clock_managers_.find(clock); + if (it == clock_managers_.end()) { + std::tie(it, std::ignore) = clock_managers_.insert( + std::make_pair(clock, std::make_unique(executor_, clock))); + } + it->second->AddTimer(timer, ready_callback); +} + +void RclTimersManager::RemoveTimer(rcl_timer_t* timer) { + const rcl_clock_t* clock = GetTimerClock(timer); + auto it = clock_managers_.find(clock); + if (it == clock_managers_.end()) { + throw py::key_error("Attempt to remove timer from unmanaged clock"); + } + it->second->RemoveTimer(timer); + if (it->second->empty()) { + clock_managers_.erase(it); + } +} + +TimersManager::TimersManager(const boost::asio::any_io_executor& executor, + std::function timer_ready_callback) + : rcl_manager_(executor), ready_callback_(timer_ready_callback) {} + +TimersManager::~TimersManager() {} + +void TimersManager::AddTimer(py::handle timer) { + PyRclMapping mapping; + py::handle handle = timer.attr("handle"); + mapping.with = std::make_unique(handle); + mapping.rcl_ptr = GetRclTimer(handle); + rcl_manager_.AddTimer(mapping.rcl_ptr, std::bind(ready_callback_, timer)); + timer_mappings_[timer] = std::move(mapping); +} + +void TimersManager::RemoveTimer(py::handle timer) { + const auto it = timer_mappings_.find(timer); + if (it == timer_mappings_.end()) { + throw py::key_error("Attempt to remove unmanaged timer"); + } + rcl_manager_.RemoveTimer(it->second.rcl_ptr); + timer_mappings_.erase(it); +} + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp new file mode 100644 index 000000000..314dac9b4 --- /dev/null +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -0,0 +1,84 @@ +// Copyright 2024 Brad Martin +// Copyright 2024 Merlin Labs, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include + +#include + +#include + +#include +#include + +#include "events_executor/python_hasher.hpp" +#include "events_executor/scoped_with.hpp" + +namespace rclpy { +namespace events_executor { + +/// This class manages low-level rcl timers in the system on behalf of EventsExecutor. +class RclTimersManager { + public: + explicit RclTimersManager(const boost::asio::any_io_executor&); + ~RclTimersManager(); + + void AddTimer(rcl_timer_t*, std::function ready_callback); + void RemoveTimer(rcl_timer_t*); + + private: + boost::asio::any_io_executor executor_; + + class ClockManager; + /// Handlers for each distinct clock source in the system. + std::unordered_map> clock_managers_; +}; + +/// This class manages rclpy.Timer Python objects on behalf of EventsExecutor. +class TimersManager { + public: + /// @p timer_ready_callback will be invoked with the timer handle whenever a managed + /// timer is ready for servicing. + TimersManager(const boost::asio::any_io_executor&, + std::function timer_ready_callback); + ~TimersManager(); + + /// Accessor for underlying rcl timer manager, for management of non-Python timers. + RclTimersManager& rcl_manager() { return rcl_manager_; } + + // Both of these methods expect the GIL to be held when they are called. + void AddTimer(pybind11::handle timer); + void RemoveTimer(pybind11::handle timer); + + private: + struct PyRclMapping { + /// Marks the corresponding Python object as in-use for as long as we're using the + /// rcl pointer derived from it. + std::unique_ptr with; + + /// The underlying rcl object + rcl_timer_t* rcl_ptr{}; + }; + + RclTimersManager rcl_manager_; + const std::function ready_callback_; + + std::unordered_map timer_mappings_; +}; + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/test/test_events_executor.py b/rclpy/test/test_events_executor.py new file mode 100644 index 000000000..8addb8ba1 --- /dev/null +++ b/rclpy/test/test_events_executor.py @@ -0,0 +1,659 @@ +# Copyright 2024 Brad Martin +# Copyright 2024 Merlin Labs, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import typing +import unittest + +import action_msgs.msg +import rclpy.action +import rclpy.duration +import rclpy.event_handler +import rclpy.executors +import rclpy.experimental +import rclpy.node +import rclpy.qos +import rclpy.time +import rosgraph_msgs.msg +import test_msgs.action +import test_msgs.msg +import test_msgs.srv + + +def _get_pub_sub_qos(transient_local: bool) -> rclpy.qos.QoSProfile: + if not transient_local: + return rclpy.qos.QoSProfile(history=rclpy.qos.HistoryPolicy.KEEP_ALL) + # For test purposes we deliberately want a TRANSIENT_LOCAL QoS with KEEP_ALL + # history. + return rclpy.qos.QoSProfile( + history=rclpy.qos.HistoryPolicy.KEEP_ALL, + durability=rclpy.qos.DurabilityPolicy.TRANSIENT_LOCAL, + ) + + +class SubTestNode(rclpy.node.Node): + """Node to test subscriptions and subscription-related events.""" + + def __init__(self, *, transient_local: bool = False): + super().__init__("test_sub_node") + self._new_pub_future: rclpy.Future[ + rclpy.event_handler.QoSSubscriptionMatchedInfo + ] | None = None + self._received_future: rclpy.Future[test_msgs.msg.BasicTypes] | None = None + self.create_subscription( + test_msgs.msg.BasicTypes, + # This node seems to get stale discovery data and then complain about QoS + # changes if we reuse the same topic name. + "test_topic" + ("_transient_local" if transient_local else ""), + self._handle_sub, + _get_pub_sub_qos(transient_local), + event_callbacks=rclpy.event_handler.SubscriptionEventCallbacks( + matched=self._handle_matched_sub + ), + ) + + def expect_pub_info( + self, + ) -> rclpy.Future[rclpy.event_handler.QoSSubscriptionMatchedInfo]: + self._new_pub_future = rclpy.Future() + return self._new_pub_future + + def expect_message(self) -> rclpy.Future[test_msgs.msg.BasicTypes]: + self._received_future = rclpy.Future() + return self._received_future + + def _handle_sub(self, msg: test_msgs.msg.BasicTypes) -> None: + if self._received_future is not None: + future = self._received_future + self._received_future = None + future.set_result(msg) + + def _handle_matched_sub( + self, info: rclpy.event_handler.QoSSubscriptionMatchedInfo + ) -> None: + """Handle a new publisher being matched to our subscription.""" + if self._new_pub_future is not None: + self._new_pub_future.set_result(info) + self._new_pub_future = None + + +class PubTestNode(rclpy.node.Node): + """Node to test publications and publication-related events.""" + + def __init__(self, *, transient_local: bool = False): + super().__init__("test_pub_node") + self._new_sub_future: rclpy.Future[ + rclpy.event_handler.QoSPublisherMatchedInfo + ] | None = None + self._pub = self.create_publisher( + test_msgs.msg.BasicTypes, + "test_topic" + ("_transient_local" if transient_local else ""), + _get_pub_sub_qos(transient_local), + event_callbacks=rclpy.event_handler.PublisherEventCallbacks( + matched=self._handle_matched_pub + ), + ) + + def expect_sub_info( + self, + ) -> rclpy.Future[rclpy.event_handler.QoSPublisherMatchedInfo]: + self._new_sub_future = rclpy.Future() + return self._new_sub_future + + def publish(self, value: float) -> None: + self._pub.publish(test_msgs.msg.BasicTypes(float32_value=value)) + + def _handle_matched_pub( + self, info: rclpy.event_handler.QoSPublisherMatchedInfo + ) -> None: + """Handle a new subscriber being matched to our publication.""" + if self._new_sub_future is not None: + self._new_sub_future.set_result(info) + self._new_sub_future = None + + +class ServiceServerTestNode(rclpy.node.Node): + """Node to test service server-side operation.""" + + def __init__(self): + super().__init__("test_service_server_node") + self._got_request_future: rclpy.Future[ + test_msgs.srv.BasicTypes.Request + ] | None = None + self._pending_response: test_msgs.srv.BasicTypes.Response | None = None + self.create_service( + test_msgs.srv.BasicTypes, "test_service", self._handle_request + ) + + def expect_request( + self, success: bool, error_msg: str + ) -> rclpy.Future[test_msgs.srv.BasicTypes.Request]: + """Expect an incoming request. + + The arguments are used to compose the response. + """ + self._got_request_future = rclpy.Future() + self._pending_response = test_msgs.srv.BasicTypes.Response( + bool_value=success, string_value=error_msg + ) + return self._got_request_future + + def _handle_request( + self, + req: test_msgs.srv.BasicTypes.Request, + res: test_msgs.srv.BasicTypes.Response, + ) -> test_msgs.srv.BasicTypes.Response: + if self._got_request_future is not None: + self._got_request_future.set_result(req) + self._got_request_future = None + if self._pending_response is not None: + res = self._pending_response + self._pending_response = None + return res + + +class ServiceClientTestNode(rclpy.node.Node): + """Node to test service client-side operation.""" + + def __init__(self): + super().__init__("test_service_client_node") + self._client = self.create_client(test_msgs.srv.BasicTypes, "test_service") + + def issue_request( + self, value: float + ) -> rclpy.Future[test_msgs.srv.BasicTypes.Response]: + req = test_msgs.srv.BasicTypes.Request(float32_value=value) + return self._client.call_async(req) + + +class TimerTestNode(rclpy.node.Node): + """Node to test timer operation.""" + + def __init__( + self, index: int = 0, parameter_overrides: list[rclpy.Parameter] | None = None + ): + super().__init__(f"test_timer{index}", parameter_overrides=parameter_overrides) + self._timer_events = 0 + self._tick_future: rclpy.Future[None] | None = None + self._timer = self.create_timer(0.1, self._handle_timer) + + @property + def timer_events(self) -> int: + return self._timer_events + + def expect_tick(self) -> rclpy.Future[None]: + self._tick_future = rclpy.Future() + return self._tick_future + + def _handle_timer(self) -> None: + self._timer_events += 1 + if self._tick_future is not None: + self._tick_future.set_result(None) + self._tick_future = None + + +class ClockPublisherNode(rclpy.node.Node): + """Node to publish rostime clock updates.""" + + def __init__(self): + super().__init__("clock_node") + self._now = rclpy.time.Time() + self._pub = self.create_publisher( + rosgraph_msgs.msg.Clock, + "/clock", + rclpy.qos.QoSProfile( + depth=1, reliability=rclpy.qos.ReliabilityPolicy.BEST_EFFORT + ), + ) + + def advance_time(self, millisec: int) -> None: + self._now += rclpy.duration.Duration(nanoseconds=millisec * 1000000) + self._pub.publish(rosgraph_msgs.msg.Clock(clock=self._now.to_msg())) + + +class ActionServerTestNode(rclpy.node.Node): + """Node to test action server-side operation.""" + + def __init__(self): + super().__init__( + "test_action_server_node", + parameter_overrides=[rclpy.Parameter("use_sim_time", value=True)], + ) + self._got_goal_future: rclpy.Future[ + test_msgs.action.Fibonacci.Goal + ] | None = None + self._srv = rclpy.action.ActionServer( + self, + test_msgs.action.Fibonacci, + "test_action", + self._handle_execute, + handle_accepted_callback=self._handle_accepted, + result_timeout=10, + ) + self._goal_handle: rclpy.action.server.ServerGoalHandle | None = None + self._sequence: list[int] = [] + + def expect_goal(self) -> rclpy.Future[test_msgs.action.Fibonacci.Goal]: + self._goal_handle = None + self._got_goal_future = rclpy.Future() + return self._got_goal_future + + def _handle_accepted( + self, goal_handle: rclpy.action.server.ServerGoalHandle + ) -> None: + self._goal_handle = goal_handle + self._sequence = [0, 1] + if self._got_goal_future is not None: + self._got_goal_future.set_result(goal_handle.request) + self._got_goal_future = None + # Wait to finish until instructed by test + + def advance_feedback(self) -> list[int] | None: + """Add an entry to the result in progress and sends a feedback message. + + Returns the current sequence in progress if incomplete, or None if the sequence + is complete and it's time to complete the operation instead. + + """ + assert self._goal_handle is not None + n = self._goal_handle.request.order + 1 + if len(self._sequence) < n: + self._sequence.append(self._sequence[-2] + self._sequence[-1]) + if len(self._sequence) >= n: + return None + + # FYI normally feedbacks would be sent from the execute handler, but we've tied + # it to its own public method for testing + fb = test_msgs.action.Fibonacci.Feedback() + fb.sequence = self._sequence + self._goal_handle.publish_feedback(fb) + return self._sequence + + def execute(self) -> rclpy.action.server.ServerGoalHandle: + """Completes the action in progress. + + Returns the handle to the goal executed. + + """ + handle = self._goal_handle + self._goal_handle = None + assert handle is not None + handle.execute() + return handle + + def _handle_execute( + self, goal_handle: rclpy.action.server.ServerGoalHandle + ) -> test_msgs.action.Fibonacci.Result: + goal_handle.succeed() + result = test_msgs.action.Fibonacci.Result() + result.sequence = self._sequence + return result + + +class ActionClientTestNode(rclpy.node.Node): + """Node to test action client-side operation.""" + + def __init__(self): + super().__init__("test_action_client_node") + self._client = rclpy.action.ActionClient( + self, test_msgs.action.Fibonacci, "test_action" + ) + self._feedback_future: rclpy.Future[ + test_msgs.action.Fibonacci.Feedback + ] | None = None + self._result_future: rclpy.Future[ + test_msgs.action.Fibonacci.Result + ] | None = None + + def send_goal( + self, order: int + ) -> rclpy.Future[rclpy.action.client.ClientGoalHandle]: + """Send a new goal. + + The future will contain the goal handle when the goal submission response has + been received. + + """ + self._client.wait_for_server() + goal_ack_future = self._client.send_goal_async( + test_msgs.action.Fibonacci.Goal(order=order), + feedback_callback=self._handle_feedback, + ) + goal_ack_future.add_done_callback(self._handle_goal_ack) + return goal_ack_future + + def _handle_goal_ack( + self, future: rclpy.Future[rclpy.action.client.ClientGoalHandle] + ) -> None: + handle = future.result() + assert handle is not None + result_future = handle.get_result_async() + result_future.add_done_callback(self._handle_result_response) + + def expect_feedback(self) -> rclpy.Future[test_msgs.action.Fibonacci.Feedback]: + self._feedback_future = rclpy.Future() + return self._feedback_future + + def _handle_feedback( + self, + # If this is a private 'Impl' detail, why is rclpy handing this out?? + fb_msg: test_msgs.action.Fibonacci.Impl.FeedbackMessage, + ) -> None: + if self._feedback_future is not None: + self._feedback_future.set_result(fb_msg.feedback) + self._feedback_future = None + + def expect_result( + self, + ) -> rclpy.Future[test_msgs.action.Fibonacci.Result]: + self._result_future = rclpy.Future() + return self._result_future + + def _handle_result_response( + self, future: rclpy.Future[test_msgs.action.Fibonacci_GetResult_Response] + ) -> None: + response: test_msgs.action.Fibonacci_GetResult_Response | None = future.result() + assert response is not None + assert self._result_future is not None + result: test_msgs.action.Fibonacci.Result = response.result + self._result_future.set_result(result) + self._result_future = None + + +class TestEventsExecutor(unittest.TestCase): + def setUp(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + # Prevent nodes under test from discovering other random stuff to talk to + os.environ["ROS_AUTOMATIC_DISCOVERY_RANGE"] = "OFF" + rclpy.init() + + self.executor = rclpy.experimental.EventsExecutor() + + def tearDown(self) -> None: + rclpy.shutdown() + + def _expect_future_done(self, future: rclpy.Future[typing.Any]) -> None: + # Use a moderately long timeout with the expectation that we shouldn't often + # need the whole duration. + self.executor.spin_until_future_complete(future, 1.0) + self.assertTrue(future.done()) + + def _expect_future_not_done(self, future: rclpy.Future[typing.Any]) -> None: + # Use a short timeout to give the future some time to complete if we are going + # to fail, but not very long because we'll be waiting the full duration every + # time during successful tests. It's ok if the timeout is a bit short and the + # failure isn't 100% deterministic. + self.executor.spin_until_future_complete(future, 0.2) + self.assertFalse(future.done()) + + def _spin_for(self, sec: float) -> None: + """Spins the executor for the given number of realtime seconds.""" + # Note that this roundabout approach of waiting on a future that will never + # finish with a timeout seems to be the only way with the rclpy.Executor API to + # spin for a fixed time. + self.executor.spin_until_future_complete(rclpy.Future(), sec) + + def _check_match_event_future( + self, + future: rclpy.Future[ + rclpy.event_handler.QoSSubscriptionMatchedInfo + | rclpy.event_handler.QoSPublisherMatchedInfo + ], + total_count: int, + current_count: int, + ) -> None: + # NOTE: fastdds appears to be buggy and reports a change in total_count with + # total_count_change staying zero. cyclonedds works as expected. Rather than + # have this test be sensitive to which RMW is selected, let's just avoid testing + # the change fields altogether. + self._expect_future_done(future) + info: ( # These two python types are both actually rmw_matched_status_t + rclpy.event_handler.QoSSubscriptionMatchedInfo + | rclpy.event_handler.QoSPublisherMatchedInfo + | None + ) = future.result() + assert info is not None + self.assertEqual(info.total_count, total_count) + self.assertEqual(info.current_count, current_count) + + def _check_message_future( + self, future: rclpy.Future[test_msgs.msg.BasicTypes], value: float + ) -> None: + self._expect_future_done(future) + msg: test_msgs.msg.BasicTypes | None = future.result() + assert msg is not None + self.assertAlmostEqual(msg.float32_value, value, places=5) + + def _check_service_request_future( + self, future: rclpy.Future[test_msgs.srv.BasicTypes.Request], value: float + ) -> None: + self._expect_future_done(future) + req: test_msgs.srv.BasicTypes.Request | None = future.result() + assert req is not None + self.assertAlmostEqual(req.float32_value, value, places=5) + + def _check_service_response_future( + self, + future: rclpy.Future[test_msgs.srv.BasicTypes.Response], + success: bool, + error_msg: str, + ) -> None: + self._expect_future_done(future) + res: test_msgs.srv.BasicTypes.Response | None = future.result() + assert res is not None + self.assertEqual(res.bool_value, success) + self.assertEqual(res.string_value, error_msg) + + def test_pub_sub(self) -> None: + sub_node = SubTestNode() + new_pub_future = sub_node.expect_pub_info() + received_future = sub_node.expect_message() + self.executor.add_node(sub_node) + + # With subscriber node alone, should be no publisher or messages + self._expect_future_not_done(new_pub_future) + self.assertFalse(received_future.done()) # Already waited a bit + + pub_node = PubTestNode() + new_sub_future = pub_node.expect_sub_info() + self.executor.add_node(pub_node) + + # Publisher and subscriber should find each other but no messages should be + # exchanged yet + self._check_match_event_future(new_pub_future, 1, 1) + new_pub_future = sub_node.expect_pub_info() + self._check_match_event_future(new_sub_future, 1, 1) + new_sub_future = pub_node.expect_sub_info() + self._expect_future_not_done(received_future) + + # Send messages and make sure they're received. + for i in range(300): + pub_node.publish(0.1 * i) + self._check_message_future(received_future, 0.1 * i) + received_future = sub_node.expect_message() + + # Destroy the subscriber node, make sure the publisher is notified + self.executor.remove_node(sub_node) + sub_node.destroy_node() + self._check_match_event_future(new_sub_future, 1, 0) + + # Publish another message to ensure all subscriber callbacks got cleaned up + pub_node.publish(4.7) + self._expect_future_not_done(new_pub_future) + self.assertFalse(received_future.done()) # Already waited a bit + + def test_pub_sub_multi_message(self) -> None: + # Creates a transient local publisher and queues multiple messages on it. Then + # creates a subscriber and makes sure all sent messages get delivered when it + # comes up. + pub_node = PubTestNode(transient_local=True) + self.executor.add_node(pub_node) + for i in range(5): + pub_node.publish(0.1 * i) + + sub_node = SubTestNode(transient_local=True) + received_future = sub_node.expect_message() + received_messages: list[test_msgs.msg.BasicTypes] = [] + + def handle_message(future: rclpy.Future[test_msgs.msg.BasicTypes]) -> None: + nonlocal received_future + msg = future.result() + assert msg is not None + received_messages.append(msg) + received_future = sub_node.expect_message() + received_future.add_done_callback(handle_message) + + received_future.add_done_callback(handle_message) + self._expect_future_not_done(received_future) + self.executor.add_node(sub_node) + while len(received_messages) < 5: + self._expect_future_done(received_future) + self.assertEqual(len(received_messages), 5) + for i in range(5): + self.assertAlmostEqual( + received_messages[i].float32_value, 0.1 * i, places=5 + ) + self._expect_future_not_done(received_future) + + pub_node.publish(0.5) + self._check_message_future(received_future, 0.5) + + def test_service(self) -> None: + server_node = ServiceServerTestNode() + got_request_future = server_node.expect_request(True, "test response 0") + self.executor.add_node(server_node) + self._expect_future_not_done(got_request_future) + + client_node = ServiceClientTestNode() + self.executor.add_node(client_node) + self._expect_future_not_done(got_request_future) + for i in range(300): + got_response_future = client_node.issue_request(7.1) + self._check_service_request_future(got_request_future, 7.1) + got_request_future = server_node.expect_request( + True, f"test response {i + 1}" + ) + self._check_service_response_future( + got_response_future, True, f"test response {i}" + ) + + # Destroy server node and retry issuing a request + self.executor.remove_node(server_node) + server_node.destroy_node() + self._expect_future_not_done(got_request_future) + got_response_future = client_node.issue_request(5.0) + self._expect_future_not_done(got_request_future) + self.assertFalse(got_response_future.done()) # Already waited a bit + + def test_timers(self) -> None: + realtime_node = TimerTestNode(index=0) + rostime_node = TimerTestNode( + index=1, parameter_overrides=[rclpy.Parameter("use_sim_time", value=True)] + ) + clock_node = ClockPublisherNode() + for node in [realtime_node, rostime_node, clock_node]: + self.executor.add_node(node) + + # Wait a bit, and make sure the realtime timer ticks, and the rostime one does + # not. Since this is based on wall time, be very flexible on tolerances here. + self._spin_for(1.0) + realtime_ticks = realtime_node.timer_events + self.assertGreater(realtime_ticks, 1) + self.assertLess(realtime_ticks, 50) + self.assertEqual(rostime_node.timer_events, 0) + + # Manually tick the rostime timer by less than a full interval. + rostime_tick_future = rostime_node.expect_tick() + for _ in range(99): + clock_node.advance_time(1) + self._expect_future_not_done(rostime_tick_future) + clock_node.advance_time(1) + self._expect_future_done(rostime_tick_future) + # Now tick by a bunch of full intervals. + for _ in range(300): + rostime_tick_future = rostime_node.expect_tick() + clock_node.advance_time(100) + self._expect_future_done(rostime_tick_future) + + # Ensure the realtime timer ticked much less than the rostime one. + self.assertLess(realtime_node.timer_events, rostime_node.timer_events) + + def test_action(self) -> None: + clock_node = ClockPublisherNode() + self.executor.add_node(clock_node) + + server_node = ActionServerTestNode() + got_goal_future = server_node.expect_goal() + self.executor.add_node(server_node) + clock_node.advance_time(0) + self._expect_future_not_done(got_goal_future) + + client_node = ActionClientTestNode() + self.executor.add_node(client_node) + self._expect_future_not_done(got_goal_future) + for i in range(300): + order = (i % 40) + 1 # Don't want sequence to get too big + goal_acknowledged_future = client_node.send_goal(order) + + self._expect_future_done(got_goal_future) + self._expect_future_done(goal_acknowledged_future) + req: test_msgs.action.Fibonacci.Goal | None = got_goal_future.result() + assert req is not None + self.assertEqual(req.order, order) + result_future = client_node.expect_result() + + while True: + got_feedback_future = client_node.expect_feedback() + seq = server_node.advance_feedback() + if seq is None: + break + self._expect_future_done(got_feedback_future) + feedback = got_feedback_future.result() + assert feedback is not None + self.assertEqual(len(feedback.sequence), len(seq)) + + last_handle = server_node.execute() + self._expect_future_done(result_future) + self.assertFalse(got_feedback_future.done()) + + res: test_msgs.action.Fibonacci.Result | None = result_future.result() + assert res is not None + self.assertEqual(len(res.sequence), order + 1) + + got_goal_future = server_node.expect_goal() + + # Test completed goal expiration by time + self.assertEqual( + last_handle.status, action_msgs.msg.GoalStatus.STATUS_SUCCEEDED + ) + clock_node.advance_time(9999) + self._spin_for(0.2) + self.assertEqual( + last_handle.status, action_msgs.msg.GoalStatus.STATUS_SUCCEEDED + ) + clock_node.advance_time(2) + self._spin_for(0.2) + self.assertEqual(last_handle.status, action_msgs.msg.GoalStatus.STATUS_UNKNOWN) + + # Destroy server node and retry issuing a goal + self.executor.remove_node(server_node) + server_node.destroy_node() + self._expect_future_not_done(got_goal_future) + goal_acknowledged_future = client_node.send_goal(5) + self._expect_future_not_done(got_goal_future) + self.assertFalse(goal_acknowledged_future.done()) # Already waited a bit + + +if __name__ == "__main__": + unittest.main() From 18036c485947f6470f801b1ff0625c7df5043391 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Sat, 4 Jan 2025 17:08:12 -0500 Subject: [PATCH 02/31] Fix explosion for reference count updates without GIL The previous version worked on 22.04+Iron, but fails on 24.04+Jazzy or Rolling. It seems like the behavior of std::bind() may have changed when asked to bind a py::object to a callback taking a py::handle. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 8cc964c66..8c9528c46 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Based on a similar approach as the iRobot rclcpp EventsExecutor implementation: @@ -530,6 +530,10 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { auto with_waitset = std::make_shared(wait_set); waitable.attr("add_to_wait_set")(wait_set); rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + // Be careful not to bind any py::objects into any callbacks, because that will try to + // do reference counting without the GIL. Ownership will be maintained with the info + // struct instance which is guaranteed to outlast any callback. + py::handle ws_handle = wait_set; // We null out each entry in the waitset as we set it up, so that the waitset itself // can be reused when something becomes ready to signal to the Waitable what's ready // and what's not. We also bind with_waitset into each callback we set up, to ensure @@ -541,7 +545,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { rcl_waitset->subscriptions[i] = nullptr; sub_entities.subscriptions.push_back(rcl_sub); const auto cb = std::bind(&EventsExecutor::HandleWaitableSubReady, this, waitable, - rcl_sub, wait_set, i, with_waitset, pl::_1); + rcl_sub, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback( rcl_sub, RclEventCallbackTrampoline, @@ -562,7 +566,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { // own an instance of `with_waitable` associated with this callback, we'll bind it // directly into the callback instead. const auto cb = std::bind(&EventsExecutor::HandleWaitableTimerReady, this, waitable, - rcl_timer, wait_set, i, with_waitable, with_waitset); + rcl_timer, ws_handle, i, with_waitable, with_waitset); timers_manager_.rcl_manager().AddTimer(rcl_timer, cb); } for (size_t i = 0; i < rcl_waitset->size_of_clients; ++i) { @@ -570,7 +574,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { rcl_waitset->clients[i] = nullptr; sub_entities.clients.push_back(rcl_client); const auto cb = std::bind(&EventsExecutor::HandleWaitableClientReady, this, - waitable, rcl_client, wait_set, i, with_waitset, pl::_1); + waitable, rcl_client, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != rcl_client_set_on_new_response_callback( rcl_client, RclEventCallbackTrampoline, @@ -586,7 +590,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { rcl_waitset->services[i] = nullptr; sub_entities.services.push_back(rcl_service); const auto cb = std::bind(&EventsExecutor::HandleWaitableServiceReady, this, - waitable, rcl_service, wait_set, i, with_waitset, pl::_1); + waitable, rcl_service, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != rcl_service_set_on_new_request_callback( rcl_service, RclEventCallbackTrampoline, @@ -602,7 +606,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { rcl_waitset->events[i] = nullptr; sub_entities.events.push_back(rcl_event); const auto cb = std::bind(&EventsExecutor::HandleWaitableEventReady, this, waitable, - rcl_event, wait_set, i, with_waitset, pl::_1); + rcl_event, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != rcl_event_set_callback(rcl_event, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback( rcl_event, cb, with_waitable))) { From ebb1ee30975e70c59680ecc076f0d6123a09c0f7 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 7 Jan 2025 15:15:37 -0500 Subject: [PATCH 03/31] Fix rclpy triggering its own deprecation warnings Signed-off-by: Brad Martin --- rclpy/rclpy/event_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rclpy/rclpy/event_handler.py b/rclpy/rclpy/event_handler.py index 3700c438d..274957769 100644 --- a/rclpy/rclpy/event_handler.py +++ b/rclpy/rclpy/event_handler.py @@ -269,7 +269,7 @@ def _default_incompatible_type_callback(event): pass if self.matched: - event_handlers.append(QoSEventHandler( + event_handlers.append(EventHandler( callback_group=callback_group, callback=self.matched, event_type=QoSSubscriptionEventType.RCL_SUBSCRIPTION_MATCHED, @@ -380,7 +380,7 @@ def _default_incompatible_type_callback(event): pass if self.matched: - event_handlers.append(QoSEventHandler( + event_handlers.append(EventHandler( callback_group=callback_group, callback=self.matched, event_type=QoSPublisherEventType.RCL_PUBLISHER_MATCHED, From 68ecfb49cff0baa6131b14204d1f8f604f4087f3 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 7 Jan 2025 16:47:35 -0500 Subject: [PATCH 04/31] Fix ament linter complaints Signed-off-by: Brad Martin --- rclpy/rclpy/experimental/__init__.py | 4 +- rclpy/rclpy/experimental/events_executor.py | 4 +- .../rclpy/events_executor/events_executor.cpp | 372 ++++++++++-------- .../rclpy/events_executor/events_executor.hpp | 137 ++++--- .../rclpy/events_executor/python_hasher.hpp | 14 +- .../src/rclpy/events_executor/rcl_support.cpp | 72 ++-- .../src/rclpy/events_executor/rcl_support.hpp | 53 +-- .../src/rclpy/events_executor/scoped_with.hpp | 18 +- .../rclpy/events_executor/timers_manager.cpp | 160 ++++---- .../rclpy/events_executor/timers_manager.hpp | 46 ++- rclpy/test/test_events_executor.py | 59 +-- 11 files changed, 536 insertions(+), 403 deletions(-) diff --git a/rclpy/rclpy/experimental/__init__.py b/rclpy/rclpy/experimental/__init__.py index a8997525f..a39b6cb34 100644 --- a/rclpy/rclpy/experimental/__init__.py +++ b/rclpy/rclpy/experimental/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Brad Martin +# Copyright 2024-2025 Brad Martin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .events_executor import EventsExecutor +from .events_executor import EventsExecutor # noqa: F401 diff --git a/rclpy/rclpy/experimental/events_executor.py b/rclpy/rclpy/experimental/events_executor.py index 98fd7d082..d3c29921f 100644 --- a/rclpy/rclpy/experimental/events_executor.py +++ b/rclpy/rclpy/experimental/events_executor.py @@ -1,4 +1,4 @@ -# Copyright 2024 Brad Martin +# Copyright 2024-2025 Brad Martin # Copyright 2024 Merlin Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,8 +16,8 @@ import typing import rclpy.executors -import rclpy.node from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy +import rclpy.node # Try to look like we inherit from the rclpy Executor for type checking purposes without diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 8c9528c46..1224b9d9c 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -24,32 +24,35 @@ // limitations under the License. #include "events_executor/events_executor.hpp" +#include +#include +#include +#include + +#include + #include #include #include -#include -#include - -#include -#include -#include - namespace pl = std::placeholders; namespace py = pybind11; -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ EventsExecutor::EventsExecutor(py::object context) - : rclpy_context_(context), - asyncio_run_(py::module_::import("asyncio").attr("run")), - rclpy_task_(py::module_::import("rclpy.task").attr("Task")), - signals_(io_context_, SIGINT, SIGTERM), - rcl_callback_manager_(io_context_.get_executor()), - timers_manager_(io_context_.get_executor(), - std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) { +: rclpy_context_(context), + asyncio_run_(py::module_::import("asyncio").attr("run")), + rclpy_task_(py::module_::import("rclpy.task").attr("Task")), + signals_(io_context_, SIGINT, SIGTERM), + rcl_callback_manager_(io_context_.get_executor()), + timers_manager_(io_context_.get_executor(), + std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) +{ // rclpy.Executor creates a sigint handling guard condition here. This is necessary // because a sleeping event loop won't notice Ctrl-C unless some other event wakes // it up otherwise. @@ -59,24 +62,26 @@ EventsExecutor::EventsExecutor(py::object context) // just establish our own signal handling directly instead. This unfortunately // bypasses the rclpy.init() options that allow a user to disable certain signal // handlers, but it doesn't look like we can do any better. - signals_.async_wait([this](const boost::system::error_code& ec, int) { - if (!ec) { - py::gil_scoped_acquire gil_acquire; - // Don't call context.try_shutdown() here, because that can call back to us to - // request a blocking shutdown(), which doesn't make any sense because we have to - // be spinning to process the callback that's asked to wait for spinning to stop. - // We'll have to call that later outside of any spin loop. - // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 - sigint_pending_.store(true); - io_context_.stop(); - } + signals_.async_wait([this](const boost::system::error_code & ec, int) { + if (!ec) { + py::gil_scoped_acquire gil_acquire; + // Don't call context.try_shutdown() here, because that can call back to us to + // request a blocking shutdown(), which doesn't make any sense because we have + // to be spinning to process the callback that's asked to wait for spinning to + // stop. We'll have to call that later outside of any spin loop. + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 + sigint_pending_.store(true); + io_context_.stop(); + } }); } -EventsExecutor::~EventsExecutor() { shutdown(); } +EventsExecutor::~EventsExecutor() {shutdown();} -pybind11::object EventsExecutor::create_task(py::object callback, py::args args, - const py::kwargs& kwargs) { +pybind11::object EventsExecutor::create_task( + py::object callback, py::args args, + const py::kwargs & kwargs) +{ // Create and return a rclpy.task.Task() object, and schedule it to be called later. using py::literals::operator""_a; py::object task = rclpy_task_(callback, args, kwargs, "executor"_a = py::cast(this)); @@ -91,7 +96,8 @@ pybind11::object EventsExecutor::create_task(py::object callback, py::args args, return task; } -bool EventsExecutor::shutdown(std::optional timeout) { +bool EventsExecutor::shutdown(std::optional timeout) +{ // NOTE: The rclpy context can invoke this with a lock on the context held. Therefore // we must not try to go access that context during this method or we can deadlock. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/context.py#L101-L103 @@ -115,7 +121,8 @@ bool EventsExecutor::shutdown(std::optional timeout) { return true; } -bool EventsExecutor::add_node(py::object node) { +bool EventsExecutor::add_node(py::object node) +{ std::lock_guard lock(nodes_mutex_); if (nodes_.contains(node)) { return false; @@ -128,7 +135,8 @@ bool EventsExecutor::add_node(py::object node) { return true; } -void EventsExecutor::remove_node(py::handle node) { +void EventsExecutor::remove_node(py::handle node) +{ std::lock_guard lock(nodes_mutex_); if (!nodes_.contains(node)) { return; @@ -139,12 +147,13 @@ void EventsExecutor::remove_node(py::handle node) { wake(); } -void EventsExecutor::wake() { +void EventsExecutor::wake() +{ if (!wake_pending_.exchange(true)) { // Update tracked entities. boost::asio::post(io_context_, [this]() { - py::gil_scoped_acquire gil_acquire; - UpdateEntitiesFromNodes(!py::cast(rclpy_context_.attr("ok")())); + py::gil_scoped_acquire gil_acquire; + UpdateEntitiesFromNodes(!py::cast(rclpy_context_.attr("ok")())); }); } } @@ -155,7 +164,8 @@ void EventsExecutor::wake() { // executor instance. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L184-L185 -void EventsExecutor::spin(std::optional timeout_sec) { +void EventsExecutor::spin(std::optional timeout_sec) +{ { std::unique_lock spin_lock(spinning_mutex_, std::try_to_lock); if (!spin_lock) { @@ -180,7 +190,8 @@ void EventsExecutor::spin(std::optional timeout_sec) { } } -void EventsExecutor::spin_once(std::optional timeout_sec) { +void EventsExecutor::spin_once(std::optional timeout_sec) +{ { std::unique_lock spin_lock(spinning_mutex_, std::try_to_lock); if (!spin_lock) { @@ -205,21 +216,25 @@ void EventsExecutor::spin_once(std::optional timeout_sec) { } } -void EventsExecutor::spin_until_future_complete(py::handle future, - std::optional timeout_sec) { +void EventsExecutor::spin_until_future_complete( + py::handle future, + std::optional timeout_sec) +{ future.attr("add_done_callback")( - py::cpp_function([this](py::handle) { io_context_.stop(); })); + py::cpp_function([this](py::handle) {io_context_.stop();})); spin(timeout_sec); } void EventsExecutor::spin_once_until_future_complete( - py::handle future, std::optional timeout_sec) { + py::handle future, std::optional timeout_sec) +{ future.attr("add_done_callback")( - py::cpp_function([this](py::handle) { io_context_.stop(); })); + py::cpp_function([this](py::handle) {io_context_.stop();})); spin_once(timeout_sec); } -void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) { +void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) +{ // Clear pending flag as early as possible, so we error on the side of retriggering // a few harmless updates rather than potentially missing important additions. wake_pending_.store(false); @@ -277,9 +292,10 @@ void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) { } void EventsExecutor::UpdateEntitySet( - py::set& entity_set, const py::set& new_entity_set, - std::function added_entity_callback, - std::function removed_entity_callback) { + py::set & entity_set, const py::set & new_entity_set, + std::function added_entity_callback, + std::function removed_entity_callback) +{ py::set added_entities = new_entity_set - entity_set; for (py::handle added_entity : added_entities) { added_entity_callback(added_entity); @@ -293,26 +309,30 @@ void EventsExecutor::UpdateEntitySet( entity_set = new_entity_set; } -void EventsExecutor::HandleAddedSubscription(py::handle subscription) { +void EventsExecutor::HandleAddedSubscription(py::handle subscription) +{ py::handle handle = subscription.attr("handle"); auto with = std::make_shared(handle); - const rcl_subscription_t* rcl_ptr = GetRclSubscription(handle); + const rcl_subscription_t * rcl_ptr = GetRclSubscription(handle); const auto cb = - std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); + std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback( rcl_ptr, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + { throw std::runtime_error( std::string("Failed to set the on new message callback for subscription: ") + rcl_get_error_string().str); } } -void EventsExecutor::HandleRemovedSubscription(py::handle subscription) { +void EventsExecutor::HandleRemovedSubscription(py::handle subscription) +{ py::handle handle = subscription.attr("handle"); - const rcl_subscription_t* rcl_ptr = GetRclSubscription(handle); + const rcl_subscription_t * rcl_ptr = GetRclSubscription(handle); if (RCL_RET_OK != - rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) { + rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) + { throw std::runtime_error( std::string("Failed to clear the on new message callback for subscription: ") + rcl_get_error_string().str); @@ -320,8 +340,10 @@ void EventsExecutor::HandleRemovedSubscription(py::handle subscription) { rcl_callback_manager_.RemoveCallback(rcl_ptr); } -void EventsExecutor::HandleSubscriptionReady(py::handle subscription, - size_t number_of_events) { +void EventsExecutor::HandleSubscriptionReady( + py::handle subscription, + size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_subscription() and _execute_subcription(). @@ -334,7 +356,7 @@ void EventsExecutor::HandleSubscriptionReady(py::handle subscription, const py::handle msg_type = subscription.attr("msg_type"); const py::handle raw = subscription.attr("raw"); const int callback_type = - py::cast(subscription.attr("_callback_type").attr("value")); + py::cast(subscription.attr("_callback_type").attr("value")); const int message_only = py::cast( subscription.attr("CallbackType").attr("MessageOnly").attr("value")); const py::handle callback = subscription.attr("callback"); @@ -354,7 +376,7 @@ void EventsExecutor::HandleSubscriptionReady(py::handle subscription, } else { callback(msg_info); } - } catch (const py::error_already_set& e) { + } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, subscription, "subscriptions"); throw; } @@ -364,46 +386,53 @@ void EventsExecutor::HandleSubscriptionReady(py::handle subscription, } } -void EventsExecutor::HandleAddedTimer(py::handle timer) { +void EventsExecutor::HandleAddedTimer(py::handle timer) +{ timers_manager_.AddTimer(timer); } -void EventsExecutor::HandleRemovedTimer(py::handle timer) { +void EventsExecutor::HandleRemovedTimer(py::handle timer) +{ timers_manager_.RemoveTimer(timer); } -void EventsExecutor::HandleTimerReady(py::handle timer) { +void EventsExecutor::HandleTimerReady(py::handle timer) +{ py::gil_scoped_acquire gil_acquire; try { // Unlike most rclpy objects this doesn't document whether it's a Callable or might // be a Coroutine. Let's hope it's the former. 🤞 timer.attr("callback")(); - } catch (const py::error_already_set& e) { + } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, timer, "timers"); throw; } } -void EventsExecutor::HandleAddedClient(py::handle client) { +void EventsExecutor::HandleAddedClient(py::handle client) +{ py::handle handle = client.attr("handle"); auto with = std::make_shared(handle); - const rcl_client_t* rcl_ptr = GetRclClient(handle); + const rcl_client_t * rcl_ptr = GetRclClient(handle); const auto cb = std::bind(&EventsExecutor::HandleClientReady, this, client, pl::_1); if (RCL_RET_OK != rcl_client_set_on_new_response_callback( rcl_ptr, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + { throw std::runtime_error( std::string("Failed to set the on new response callback for client: ") + rcl_get_error_string().str); } } -void EventsExecutor::HandleRemovedClient(py::handle client) { +void EventsExecutor::HandleRemovedClient(py::handle client) +{ py::handle handle = client.attr("handle"); - const rcl_client_t* rcl_ptr = GetRclClient(handle); + const rcl_client_t * rcl_ptr = GetRclClient(handle); if (RCL_RET_OK != - rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) { + rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) + { throw std::runtime_error( std::string("Failed to clear the on new response callback for client: ") + rcl_get_error_string().str); @@ -411,7 +440,8 @@ void EventsExecutor::HandleRemovedClient(py::handle client) { rcl_callback_manager_.RemoveCallback(rcl_ptr); } -void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_events) { +void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_client() and _execute_client(). @@ -431,7 +461,7 @@ void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_event py::object future; try { future = get_pending_request(sequence); - } catch (const py::error_already_set& e) { + } catch (const py::error_already_set & e) { if (e.matches(PyExc_KeyError)) { // The request was cancelled continue; @@ -441,7 +471,7 @@ void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_event future.attr("_set_executor")(py::cast(this)); try { future.attr("set_result")(response); - } catch (const py::error_already_set& e) { + } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, client, "clients"); throw; } @@ -449,25 +479,29 @@ void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_event } } -void EventsExecutor::HandleAddedService(py::handle service) { +void EventsExecutor::HandleAddedService(py::handle service) +{ py::handle handle = service.attr("handle"); auto with = std::make_shared(handle); - const rcl_service_t* rcl_ptr = GetRclService(handle); + const rcl_service_t * rcl_ptr = GetRclService(handle); const auto cb = std::bind(&EventsExecutor::HandleServiceReady, this, service, pl::_1); if (RCL_RET_OK != rcl_service_set_on_new_request_callback( rcl_ptr, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + { throw std::runtime_error( std::string("Failed to set the on new request callback for service: ") + rcl_get_error_string().str); } } -void EventsExecutor::HandleRemovedService(py::handle service) { +void EventsExecutor::HandleRemovedService(py::handle service) +{ py::handle handle = service.attr("handle"); - const rcl_service_t* rcl_ptr = GetRclService(handle); + const rcl_service_t * rcl_ptr = GetRclService(handle); if (RCL_RET_OK != - rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) { + rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) + { throw std::runtime_error( std::string("Failed to clear the on new request callback for service: ") + rcl_get_error_string().str); @@ -475,7 +509,8 @@ void EventsExecutor::HandleRemovedService(py::handle service) { rcl_callback_manager_.RemoveCallback(rcl_ptr); } -void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_events) { +void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_service() and _execute_service(). @@ -498,7 +533,7 @@ void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_eve py::object response; try { response = callback(request, res_type()); - } catch (const py::error_already_set& e) { + } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, service, "services"); throw; } @@ -508,7 +543,8 @@ void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_eve } } -void EventsExecutor::HandleAddedWaitable(py::handle waitable) { +void EventsExecutor::HandleAddedWaitable(py::handle waitable) +{ // The Waitable API is too abstract for us to work with directly; it only exposes APIs // for dealing with wait sets, and all of the rcl callback API requires knowing // exactly what kinds of rcl objects you're working with. We'll try to figure out @@ -522,14 +558,14 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { throw std::runtime_error("Guard conditions not supported"); } const py::object _rclpy = py::module_::import("rclpy.impl.implementation_singleton") - .attr("rclpy_implementation"); + .attr("rclpy_implementation"); py::object wait_set = _rclpy.attr("WaitSet")( num_entities.attr("num_subscriptions"), 0, num_entities.attr("num_timers"), num_entities.attr("num_clients"), num_entities.attr("num_services"), num_entities.attr("num_events"), rclpy_context_.attr("handle")); auto with_waitset = std::make_shared(wait_set); waitable.attr("add_to_wait_set")(wait_set); - rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); // Be careful not to bind any py::objects into any callbacks, because that will try to // do reference counting without the GIL. Ownership will be maintained with the info // struct instance which is guaranteed to outlast any callback. @@ -541,15 +577,16 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { // registered. WaitableSubEntities sub_entities; for (size_t i = 0; i < rcl_waitset->size_of_subscriptions; ++i) { - const rcl_subscription_t* const rcl_sub = rcl_waitset->subscriptions[i]; + const rcl_subscription_t * const rcl_sub = rcl_waitset->subscriptions[i]; rcl_waitset->subscriptions[i] = nullptr; sub_entities.subscriptions.push_back(rcl_sub); const auto cb = std::bind(&EventsExecutor::HandleWaitableSubReady, this, waitable, rcl_sub, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != - rcl_subscription_set_on_new_message_callback( + rcl_subscription_set_on_new_message_callback( rcl_sub, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_sub, cb, with_waitable))) { + rcl_callback_manager_.MakeCallback(rcl_sub, cb, with_waitable))) + { throw std::runtime_error( std::string( "Failed to set the on new message callback for Waitable subscription: ") + @@ -559,7 +596,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { for (size_t i = 0; i < rcl_waitset->size_of_timers; ++i) { // Unfortunately we do require a non-const pointer here, while the waitset structure // contains a const pointer. - rcl_timer_t* const rcl_timer = const_cast(rcl_waitset->timers[i]); + rcl_timer_t * const rcl_timer = const_cast(rcl_waitset->timers[i]); rcl_waitset->timers[i] = nullptr; sub_entities.timers.push_back(rcl_timer); // Since this callback doesn't go through RclCallbackManager which would otherwise @@ -570,15 +607,16 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { timers_manager_.rcl_manager().AddTimer(rcl_timer, cb); } for (size_t i = 0; i < rcl_waitset->size_of_clients; ++i) { - const rcl_client_t* const rcl_client = rcl_waitset->clients[i]; + const rcl_client_t * const rcl_client = rcl_waitset->clients[i]; rcl_waitset->clients[i] = nullptr; sub_entities.clients.push_back(rcl_client); const auto cb = std::bind(&EventsExecutor::HandleWaitableClientReady, this, waitable, rcl_client, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != - rcl_client_set_on_new_response_callback( + rcl_client_set_on_new_response_callback( rcl_client, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_client, cb, with_waitable))) { + rcl_callback_manager_.MakeCallback(rcl_client, cb, with_waitable))) + { throw std::runtime_error( std::string( "Failed to set the on new response callback for Waitable client: ") + @@ -586,15 +624,16 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { } } for (size_t i = 0; i < rcl_waitset->size_of_services; ++i) { - const rcl_service_t* const rcl_service = rcl_waitset->services[i]; + const rcl_service_t * const rcl_service = rcl_waitset->services[i]; rcl_waitset->services[i] = nullptr; sub_entities.services.push_back(rcl_service); const auto cb = std::bind(&EventsExecutor::HandleWaitableServiceReady, this, waitable, rcl_service, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != - rcl_service_set_on_new_request_callback( + rcl_service_set_on_new_request_callback( rcl_service, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_service, cb, with_waitable))) { + rcl_callback_manager_.MakeCallback(rcl_service, cb, with_waitable))) + { throw std::runtime_error( std::string( "Failed to set the on new request callback for Waitable service: ") + @@ -602,14 +641,15 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { } } for (size_t i = 0; i < rcl_waitset->size_of_events; ++i) { - const rcl_event_t* const rcl_event = rcl_waitset->events[i]; + const rcl_event_t * const rcl_event = rcl_waitset->events[i]; rcl_waitset->events[i] = nullptr; sub_entities.events.push_back(rcl_event); const auto cb = std::bind(&EventsExecutor::HandleWaitableEventReady, this, waitable, rcl_event, ws_handle, i, with_waitset, pl::_1); if (RCL_RET_OK != rcl_event_set_callback(rcl_event, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback( - rcl_event, cb, with_waitable))) { + rcl_event, cb, with_waitable))) + { throw std::runtime_error( std::string("Failed to set the callback for Waitable event: ") + rcl_get_error_string().str); @@ -622,43 +662,47 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) { waitable_entities_[waitable] = std::move(sub_entities); } -void EventsExecutor::HandleRemovedWaitable(py::handle waitable) { +void EventsExecutor::HandleRemovedWaitable(py::handle waitable) +{ const auto nh = waitable_entities_.extract(waitable); if (!nh) { throw std::runtime_error("Couldn't find sub-entities entry for removed Waitable"); } - const WaitableSubEntities& sub_entities = nh.mapped(); - for (const rcl_subscription_t* const rcl_sub : sub_entities.subscriptions) { + const WaitableSubEntities & sub_entities = nh.mapped(); + for (const rcl_subscription_t * const rcl_sub : sub_entities.subscriptions) { if (RCL_RET_OK != - rcl_subscription_set_on_new_message_callback(rcl_sub, nullptr, nullptr)) { + rcl_subscription_set_on_new_message_callback(rcl_sub, nullptr, nullptr)) + { throw std::runtime_error(std::string("Failed to clear the on new message " "callback for Waitable subscription: ") + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_sub); } - for (rcl_timer_t* const rcl_timer : sub_entities.timers) { + for (rcl_timer_t * const rcl_timer : sub_entities.timers) { timers_manager_.rcl_manager().RemoveTimer(rcl_timer); } - for (const rcl_client_t* const rcl_client : sub_entities.clients) { + for (const rcl_client_t * const rcl_client : sub_entities.clients) { if (RCL_RET_OK != - rcl_client_set_on_new_response_callback(rcl_client, nullptr, nullptr)) { + rcl_client_set_on_new_response_callback(rcl_client, nullptr, nullptr)) + { throw std::runtime_error(std::string("Failed to clear the on new response " "callback for Waitable client: ") + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_client); } - for (const rcl_service_t* const rcl_service : sub_entities.services) { + for (const rcl_service_t * const rcl_service : sub_entities.services) { if (RCL_RET_OK != - rcl_service_set_on_new_request_callback(rcl_service, nullptr, nullptr)) { + rcl_service_set_on_new_request_callback(rcl_service, nullptr, nullptr)) + { throw std::runtime_error(std::string("Failed to clear the on new request " "callback for Waitable service: ") + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_service); } - for (const rcl_event_t* const rcl_event : sub_entities.events) { + for (const rcl_event_t * const rcl_event : sub_entities.events) { if (RCL_RET_OK != rcl_event_set_callback(rcl_event, nullptr, nullptr)) { throw std::runtime_error( std::string("Failed to clear the callback for Waitable event: ") + @@ -669,64 +713,71 @@ void EventsExecutor::HandleRemovedWaitable(py::handle waitable) { } void EventsExecutor::HandleWaitableSubReady( - py::handle waitable, const rcl_subscription_t* rcl_sub, py::handle wait_set, - size_t wait_set_sub_index, std::shared_ptr, size_t number_of_events) { + py::handle waitable, const rcl_subscription_t * rcl_sub, py::handle wait_set, + size_t wait_set_sub_index, std::shared_ptr, size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our subscription object is // ready, and then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); rcl_waitset->subscriptions[wait_set_sub_index] = rcl_sub; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. rcl_waitset->subscriptions[wait_set_sub_index] = nullptr; } -void EventsExecutor::HandleWaitableTimerReady(py::handle waitable, - const rcl_timer_t* rcl_timer, - py::handle wait_set, - size_t wait_set_timer_index, - std::shared_ptr, - std::shared_ptr) { +void EventsExecutor::HandleWaitableTimerReady( + py::handle waitable, + const rcl_timer_t * rcl_timer, + py::handle wait_set, + size_t wait_set_timer_index, + std::shared_ptr, + std::shared_ptr) +{ py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our timer object is ready, and // then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); rcl_waitset->timers[wait_set_timer_index] = rcl_timer; HandleWaitableReady(waitable, wait_set, 1); // Null out the wait set again so that other callbacks can use it on other objects. rcl_waitset->timers[wait_set_timer_index] = nullptr; } -void EventsExecutor::HandleWaitableClientReady(py::handle waitable, - const rcl_client_t* rcl_client, - py::handle wait_set, - size_t wait_set_client_index, - std::shared_ptr, - size_t number_of_events) { +void EventsExecutor::HandleWaitableClientReady( + py::handle waitable, + const rcl_client_t * rcl_client, + py::handle wait_set, + size_t wait_set_client_index, + std::shared_ptr, + size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our client object is ready, and // then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); rcl_waitset->clients[wait_set_client_index] = rcl_client; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. rcl_waitset->clients[wait_set_client_index] = nullptr; } -void EventsExecutor::HandleWaitableServiceReady(py::handle waitable, - const rcl_service_t* rcl_service, - py::handle wait_set, - size_t wait_set_service_index, - std::shared_ptr, - size_t number_of_events) { +void EventsExecutor::HandleWaitableServiceReady( + py::handle waitable, + const rcl_service_t * rcl_service, + py::handle wait_set, + size_t wait_set_service_index, + std::shared_ptr, + size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our service object is ready, // and then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); rcl_waitset->services[wait_set_service_index] = rcl_service; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. @@ -734,28 +785,31 @@ void EventsExecutor::HandleWaitableServiceReady(py::handle waitable, } void EventsExecutor::HandleWaitableEventReady( - py::handle waitable, const rcl_event_t* rcl_event, py::handle wait_set, - size_t wait_set_event_index, std::shared_ptr, size_t number_of_events) { + py::handle waitable, const rcl_event_t * rcl_event, py::handle wait_set, + size_t wait_set_event_index, std::shared_ptr, size_t number_of_events) +{ py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our event object is ready, and // then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t* const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); rcl_waitset->events[wait_set_event_index] = rcl_event; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. rcl_waitset->events[wait_set_event_index] = nullptr; } -void EventsExecutor::HandleWaitableReady(py::handle waitable, py::handle wait_set, - size_t number_of_events) { +void EventsExecutor::HandleWaitableReady( + py::handle waitable, py::handle wait_set, + size_t number_of_events) +{ // Largely based on rclpy.Executor._take_waitable() // https://github.com/ros2/rclpy/blob/a19180c238d4d97ed2b58868d8fb7fa3e3b621f2/rclpy/rclpy/executors.py#L447-L454 py::object is_ready = waitable.attr("is_ready"); py::object take_data = waitable.attr("take_data"); py::object execute = waitable.attr("execute"); py::object futures = waitable.attr("_futures"); - for (auto& future : futures) { + for (auto & future : futures) { future.attr("_set_executor")(py::cast(this)); } for (size_t i = 0; i < number_of_events; ++i) { @@ -769,14 +823,15 @@ void EventsExecutor::HandleWaitableReady(py::handle waitable, py::handle wait_se // execute() is an async method, we need to use asyncio to run it // TODO(bmartin427) Don't run all of this immediately, blocking everything else asyncio_run_(execute(data)); - } catch (const py::error_already_set& e) { + } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, waitable, "waitables"); throw; } } } -void EventsExecutor::IterateTask(py::handle task) { +void EventsExecutor::IterateTask(py::handle task) +{ py::gil_scoped_acquire gil_acquire; // Calling this won't throw, but it may set the exception property on the task object. task(); @@ -794,7 +849,7 @@ void EventsExecutor::IterateTask(py::handle task) { scope["ex"] = ex; try { py::exec("raise ex", scope); - } catch (py::error_already_set& cpp_ex) { + } catch (py::error_already_set & cpp_ex) { // There's no good way to know what node this task came from. If we only have // one node, we can use the logger from that, otherwise we'll have to leave it // undefined. @@ -816,15 +871,15 @@ void EventsExecutor::IterateTask(py::handle task) { } void EventsExecutor::HandleCallbackExceptionInNodeEntity( - const py::error_already_set& exc, py::handle entity, - const std::string& node_entity_attr) { + const py::error_already_set & exc, py::handle entity, + const std::string & node_entity_attr) +{ // Try to identify the node associated with the entity that threw the exception, so we // can log to it. for (py::handle node : nodes_) { if (py::set(node.attr(node_entity_attr.c_str())).contains(entity)) { - HandleCallbackExceptionWithLogger(exc, node.attr("get_logger")(), - node_entity_attr); - return; + return HandleCallbackExceptionWithLogger(exc, node.attr("get_logger")(), + node_entity_attr); } } @@ -832,9 +887,11 @@ void EventsExecutor::HandleCallbackExceptionInNodeEntity( HandleCallbackExceptionWithLogger(exc, py::none(), node_entity_attr); } -void EventsExecutor::HandleCallbackExceptionWithLogger(const py::error_already_set& exc, - py::object logger, - const std::string& entity_type) { +void EventsExecutor::HandleCallbackExceptionWithLogger( + const py::error_already_set & exc, + py::object logger, + const std::string & entity_type) +{ if (logger.is_none()) { py::object logging = py::module_::import("rclpy.logging"); logger = logging.attr("get_logger")("UNKNOWN"); @@ -860,20 +917,21 @@ logger.warn("Error occurred at:\n" + "".join(traceback.format_tb(exc_trace))) // pybind11 module bindings -void define_events_executor(py::object module) { +void define_events_executor(py::object module) +{ py::class_(module, "EventsExecutor") - .def(py::init(), py::arg("context")) - .def_property_readonly("context", &EventsExecutor::get_context) - .def("create_task", &EventsExecutor::create_task, py::arg("callback")) - .def("shutdown", &EventsExecutor::shutdown, py::arg("timeout_sec") = py::none()) - .def("add_node", &EventsExecutor::add_node, py::arg("node")) - .def("remove_node", &EventsExecutor::remove_node, py::arg("node")) - .def("wake", &EventsExecutor::wake) - .def("spin", [](EventsExecutor& exec) { exec.spin(); }) - .def("spin_once", &EventsExecutor::spin_once, py::arg("timeout_sec") = py::none()) - .def("spin_until_future_complete", &EventsExecutor::spin_until_future_complete, + .def(py::init(), py::arg("context")) + .def_property_readonly("context", &EventsExecutor::get_context) + .def("create_task", &EventsExecutor::create_task, py::arg("callback")) + .def("shutdown", &EventsExecutor::shutdown, py::arg("timeout_sec") = py::none()) + .def("add_node", &EventsExecutor::add_node, py::arg("node")) + .def("remove_node", &EventsExecutor::remove_node, py::arg("node")) + .def("wake", &EventsExecutor::wake) + .def("spin", [](EventsExecutor & exec) {exec.spin();}) + .def("spin_once", &EventsExecutor::spin_once, py::arg("timeout_sec") = py::none()) + .def("spin_until_future_complete", &EventsExecutor::spin_until_future_complete, py::arg("future"), py::arg("timeout_sec") = py::none()) - .def("spin_once_until_future_complete", + .def("spin_once_until_future_complete", &EventsExecutor::spin_once_until_future_complete, py::arg("future"), py::arg("timeout_sec") = py::none()); } diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 513250f1b..689df19a2 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,14 @@ // limitations under the License. #pragma once +#include + +#include +#include +#include +#include +#include + #include #include #include @@ -26,20 +34,14 @@ #include #include -#include -#include -#include -#include -#include - -#include - #include "rcl_support.hpp" #include "scoped_with.hpp" #include "timers_manager.hpp" -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ /// Events executor implementation for rclpy /// @@ -52,35 +54,39 @@ namespace events_executor { /// (ignoring any parallelism that might be allowed by the callback group /// configuration). class EventsExecutor { - public: +public: /// @param context the rclpy Context object to operate on explicit EventsExecutor(pybind11::object context); ~EventsExecutor(); // rclpy Executor API methods: - pybind11::object get_context() const { return rclpy_context_; } - pybind11::object create_task(pybind11::object callback, pybind11::args args, - const pybind11::kwargs& kwargs); + pybind11::object get_context() const {return rclpy_context_;} + pybind11::object create_task( + pybind11::object callback, pybind11::args args, + const pybind11::kwargs & kwargs); bool shutdown(std::optional timeout_sec = {}); bool add_node(pybind11::object node); void remove_node(pybind11::handle node); void wake(); void spin(std::optional timeout_sec = {}); void spin_once(std::optional timeout_sec = {}); - void spin_until_future_complete(pybind11::handle future, - std::optional timeout_sec = {}); - void spin_once_until_future_complete(pybind11::handle future, - std::optional timeout_sec = {}); - - private: + void spin_until_future_complete( + pybind11::handle future, + std::optional timeout_sec = {}); + void spin_once_until_future_complete( + pybind11::handle future, + std::optional timeout_sec = {}); + +private: // Structure to hold entities discovered underlying a Waitable object. - struct WaitableSubEntities { - std::vector subscriptions; - std::vector timers; // Can't be const - std::vector clients; - std::vector services; - std::vector events; + struct WaitableSubEntities + { + std::vector subscriptions; + std::vector timers; // Can't be const + std::vector clients; + std::vector services; + std::vector events; }; /// Updates the sets of known entities based on the currently tracked nodes. This is @@ -91,9 +97,10 @@ class EventsExecutor { /// Given an existing set of entities and a set with the desired new state, updates /// the existing set and invokes callbacks on each added or removed entity. - void UpdateEntitySet(pybind11::set& entity_set, const pybind11::set& new_entity_set, - std::function added_entity_callback, - std::function removed_entity_callback); + void UpdateEntitySet( + pybind11::set & entity_set, const pybind11::set & new_entity_set, + std::function added_entity_callback, + std::function removed_entity_callback); void HandleAddedSubscription(pybind11::handle); void HandleRemovedSubscription(pybind11::handle); @@ -113,41 +120,49 @@ class EventsExecutor { void HandleAddedWaitable(pybind11::handle); void HandleRemovedWaitable(pybind11::handle); - void HandleWaitableSubReady(pybind11::handle waitable, const rcl_subscription_t*, - pybind11::handle wait_set, size_t wait_set_sub_index, - std::shared_ptr with_waitset, - size_t number_of_events); - void HandleWaitableTimerReady(pybind11::handle waitable, const rcl_timer_t*, - pybind11::handle wait_set, size_t wait_set_timer_index, - std::shared_ptr with_waitable, - std::shared_ptr with_waitset); - void HandleWaitableClientReady(pybind11::handle waitable, const rcl_client_t*, - pybind11::handle wait_set, - size_t wait_set_client_index, - std::shared_ptr with_waitset, - size_t number_of_events); - void HandleWaitableServiceReady(pybind11::handle waitable, const rcl_service_t*, - pybind11::handle wait_set, - size_t wait_set_service_index, - std::shared_ptr with_waitset, - size_t number_of_events); - void HandleWaitableEventReady(pybind11::handle waitable, const rcl_event_t*, - pybind11::handle wait_set, size_t wait_set_event_index, - std::shared_ptr with_waitset, - size_t number_of_events); - void HandleWaitableReady(pybind11::handle waitable, pybind11::handle wait_set, - size_t number_of_events); + void HandleWaitableSubReady( + pybind11::handle waitable, const rcl_subscription_t *, + pybind11::handle wait_set, size_t wait_set_sub_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableTimerReady( + pybind11::handle waitable, const rcl_timer_t *, + pybind11::handle wait_set, size_t wait_set_timer_index, + std::shared_ptr with_waitable, + std::shared_ptr with_waitset); + void HandleWaitableClientReady( + pybind11::handle waitable, const rcl_client_t *, + pybind11::handle wait_set, + size_t wait_set_client_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableServiceReady( + pybind11::handle waitable, const rcl_service_t *, + pybind11::handle wait_set, + size_t wait_set_service_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableEventReady( + pybind11::handle waitable, const rcl_event_t *, + pybind11::handle wait_set, size_t wait_set_event_index, + std::shared_ptr with_waitset, + size_t number_of_events); + void HandleWaitableReady( + pybind11::handle waitable, pybind11::handle wait_set, + size_t number_of_events); /// Helper for create_task(). @p task needs to have had one reference manually added /// to it. See create_task() implementation for details. void IterateTask(pybind11::handle task); - void HandleCallbackExceptionInNodeEntity(const pybind11::error_already_set&, - pybind11::handle entity, - const std::string& node_entity_attr); - void HandleCallbackExceptionWithLogger(const pybind11::error_already_set&, - pybind11::object logger, - const std::string& entity_type); + void HandleCallbackExceptionInNodeEntity( + const pybind11::error_already_set &, + pybind11::handle entity, + const std::string & node_entity_attr); + void HandleCallbackExceptionWithLogger( + const pybind11::error_already_set &, + pybind11::object logger, + const std::string & entity_type); const pybind11::object rclpy_context_; @@ -174,7 +189,7 @@ class EventsExecutor { /// Cache for rcl pointers underlying each waitables_ entry, because those are harder /// to retrieve than the other entity types. std::unordered_map - waitable_entities_; + waitable_entities_; RclCallbackManager rcl_callback_manager_; TimersManager timers_manager_; diff --git a/rclpy/src/rclpy/events_executor/python_hasher.hpp b/rclpy/src/rclpy/events_executor/python_hasher.hpp index 1ff9f8c2b..db9a1e6f6 100644 --- a/rclpy/src/rclpy/events_executor/python_hasher.hpp +++ b/rclpy/src/rclpy/events_executor/python_hasher.hpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,13 +16,17 @@ #include -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ /// This is intended to be used as the Hash template arg to STL containers using a /// pybind11::handle as a Key. This is the same hash that a native Python dict or set /// would use given the same key. -struct PythonHasher { - inline ssize_t operator()(const pybind11::handle& handle) const { +struct PythonHasher +{ + inline ssize_t operator()(const pybind11::handle & handle) const + { return pybind11::hash(handle); } }; diff --git a/rclpy/src/rclpy/events_executor/rcl_support.cpp b/rclpy/src/rclpy/events_executor/rcl_support.cpp index 1a53a9e7e..393ce5d2c 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.cpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.cpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,19 +22,24 @@ namespace py = pybind11; -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ -extern "C" void RclEventCallbackTrampoline(const void* user_data, - size_t number_of_events) { - const auto cb = reinterpret_cast*>(user_data); +extern "C" void RclEventCallbackTrampoline( + const void * user_data, + size_t number_of_events) +{ + const auto cb = reinterpret_cast *>(user_data); (*cb)(number_of_events); } -RclCallbackManager::RclCallbackManager(const boost::asio::any_io_executor& executor) - : executor_(executor) {} +RclCallbackManager::RclCallbackManager(const boost::asio::any_io_executor & executor) +: executor_(executor) {} -RclCallbackManager::~RclCallbackManager() { +RclCallbackManager::~RclCallbackManager() +{ // Should not still have any callbacks registered when we exit, because otherwise RCL // can call pointers that will no longer be valid. We can't throw an exception here, // but we can explode. @@ -45,9 +50,11 @@ RclCallbackManager::~RclCallbackManager() { } } -const void* RclCallbackManager::MakeCallback(const void* key, - std::function callback, - std::shared_ptr with) { +const void * RclCallbackManager::MakeCallback( + const void * key, + std::function callback, + std::shared_ptr with) +{ // We don't support replacing an existing callback with a new one, because it gets // tricky making sure we don't delete an old callback while the middleware still holds // a pointer to it. @@ -56,57 +63,64 @@ const void* RclCallbackManager::MakeCallback(const void* key, } CbEntry new_entry; new_entry.cb = std::make_unique>( - [this, callback, key](size_t number_of_events) { - boost::asio::post(executor_, [this, callback, key, number_of_events]() { - if (!owned_cbs_.count(key)) { + [this, callback, key](size_t number_of_events) { + boost::asio::post(executor_, [this, callback, key, number_of_events]() { + if (!owned_cbs_.count(key)) { // This callback has been removed, just drop it as the objects it may want // to touch may no longer exist. - return; - } - callback(number_of_events); + return; + } + callback(number_of_events); }); }); new_entry.with = with; - const void* ret = new_entry.cb.get(); + const void * ret = new_entry.cb.get(); owned_cbs_[key] = std::move(new_entry); return ret; } -void RclCallbackManager::RemoveCallback(const void* key) { +void RclCallbackManager::RemoveCallback(const void * key) +{ if (!owned_cbs_.erase(key)) { throw py::key_error("Attempt to remove nonexistent callback"); } } -namespace { +namespace +{ // This helper function is used for retrieving rcl pointers from _rclpy C++ objects. // Because _rclpy doesn't install its C++ headers for public use, it's difficult to use // the C++ classes directly. But, we can treat them as if they are Python objects using // their defined Python API. Unfortunately the python interfaces convert the returned // pointer to integers, so recovering that looks a little weird. -template -RclT* GetRclPtr(py::handle py_ent_handle) { - return reinterpret_cast(py::cast(py_ent_handle.attr("pointer"))); +template +RclT * GetRclPtr(py::handle py_ent_handle) +{ + return reinterpret_cast(py::cast(py_ent_handle.attr("pointer"))); } } // namespace -rcl_subscription_t* GetRclSubscription(py::handle sub_handle) { +rcl_subscription_t * GetRclSubscription(py::handle sub_handle) +{ return GetRclPtr(sub_handle); } -rcl_timer_t* GetRclTimer(py::handle timer_handle) { +rcl_timer_t * GetRclTimer(py::handle timer_handle) +{ return GetRclPtr(timer_handle); } -rcl_client_t* GetRclClient(py::handle cl_handle) { +rcl_client_t * GetRclClient(py::handle cl_handle) +{ return GetRclPtr(cl_handle); } -rcl_service_t* GetRclService(py::handle srv_handle) { +rcl_service_t * GetRclService(py::handle srv_handle) +{ return GetRclPtr(srv_handle); } -rcl_wait_set_t* GetRclWaitSet(py::handle ws) { return GetRclPtr(ws); } +rcl_wait_set_t * GetRclWaitSet(py::handle ws) {return GetRclPtr(ws);} } // namespace events_executor } // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/rcl_support.hpp b/rclpy/src/rclpy/events_executor/rcl_support.hpp index 982461bf1..8b29e60ca 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.hpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.hpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,11 +14,7 @@ // limitations under the License. #pragma once -#include -#include -#include - -#include +#include #include #include @@ -26,12 +22,18 @@ #include #include -#include +#include +#include +#include + +#include #include "scoped_with.hpp" -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ /// Use this for all RCL event callbacks. Use the return value from /// RclCallbackManager::MakeCallback() as the user data arg. @@ -39,35 +41,38 @@ namespace events_executor { /// Note that RCL callbacks are invoked in some arbitrary thread originating from the /// middleware. Callbacks should process quickly to avoid blocking the middleware; i.e. /// all actual work should be posted to an asio loop in another thread. -extern "C" void RclEventCallbackTrampoline(const void* user_data, - size_t number_of_events); +extern "C" void RclEventCallbackTrampoline( + const void * user_data, + size_t number_of_events); /// Creates and maintains callback wrappers used with the RCL C library. class RclCallbackManager { - public: +public: /// All user callbacks will be posted on the @p executor given to the constructor. /// These callbacks will be invoked without the Python Global Interpreter Lock held, /// so if they need to access Python at all make sure to acquire that explicitly. - explicit RclCallbackManager(const boost::asio::any_io_executor& executor); + explicit RclCallbackManager(const boost::asio::any_io_executor & executor); ~RclCallbackManager(); /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a /// pointer to the rcl object that will be associated with the callback. @p with /// protects the _rclpy object handle owning the RCL object, for the duration the /// callback is established. - const void* MakeCallback(const void* key, std::function callback, - std::shared_ptr with); + const void * MakeCallback( + const void * key, std::function callback, + std::shared_ptr with); /// Discard a previously constructed callback. @p key should be the same value /// provided to MakeCallback(). Caution: ensure that RCL is no longer using a /// callback before invoking this. - void RemoveCallback(const void* key); + void RemoveCallback(const void * key); - private: +private: /// The C RCL interface deals in raw pointers, so someone needs to own the C++ /// function objects we'll be calling into. We use unique pointers so the raw pointer /// to the object remains stable while the map is manipulated. - struct CbEntry { + struct CbEntry + { std::unique_ptr> cb; std::shared_ptr with; }; @@ -76,33 +81,33 @@ class RclCallbackManager { /// The map key is the raw pointer to the RCL entity object (subscription, etc) /// associated with the callback. - std::unordered_map owned_cbs_; + std::unordered_map owned_cbs_; }; /// Returns the RCL subscription object pointer from a subscription handle (i.e. the /// handle attribute of an rclpy Subscription object, which is a _rclpy C++ /// Subscription object). Assumes that a ScopedWith has already been entered on the /// given handle. -rcl_subscription_t* GetRclSubscription(pybind11::handle); +rcl_subscription_t * GetRclSubscription(pybind11::handle); /// Returns the RCL timer object pointer from a timer handle (i.e. the handle attribute /// of an rclpy Timer object, which is a _rclpy C++ Timer object). Assumes that a /// ScopedWith has already been entered on the given handle. -rcl_timer_t* GetRclTimer(pybind11::handle); +rcl_timer_t * GetRclTimer(pybind11::handle); /// Returns the RCL client object pointer from a client handle (i.e. the handle /// attribute of an rclpy Client object, which is a _rclpy C++ Client object). Assumes /// that a ScopedWith has already been entered on the given handle. -rcl_client_t* GetRclClient(pybind11::handle); +rcl_client_t * GetRclClient(pybind11::handle); /// Returns the RCL service object pointer from a service handle (i.e. the handle /// attribute of an rclpy Service object, which is a _rclpy C++ Service object). /// Assumes that a ScopedWith has already been entered on the given handle. -rcl_service_t* GetRclService(pybind11::handle); +rcl_service_t * GetRclService(pybind11::handle); /// Returns the RCL wait set object pointer from a _rclpy C++ WaitSet object. Assumes /// that a ScopedWith has already been entered on the given object. -rcl_wait_set_t* GetRclWaitSet(pybind11::handle); +rcl_wait_set_t * GetRclWaitSet(pybind11::handle); } // namespace events_executor } // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/scoped_with.hpp b/rclpy/src/rclpy/events_executor/scoped_with.hpp index 53e3e6daa..c24eb5361 100644 --- a/rclpy/src/rclpy/events_executor/scoped_with.hpp +++ b/rclpy/src/rclpy/events_executor/scoped_with.hpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,22 +16,26 @@ #include -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ /// Enters a python context manager for the scope of this object instance. class ScopedWith { - public: +public: explicit ScopedWith(pybind11::handle object) - : object_(pybind11::cast(object)) { + : object_(pybind11::cast(object)) + { object_.attr("__enter__")(); } - ~ScopedWith() { + ~ScopedWith() + { object_.attr("__exit__")(pybind11::none(), pybind11::none(), pybind11::none()); } - private: +private: pybind11::object object_; }; diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index 2318754e5..0e62d241f 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,8 @@ // limitations under the License. #include "events_executor/timers_manager.hpp" +#include + #include #include #include @@ -25,16 +27,17 @@ #include #include -#include - #include "rcl_support.hpp" namespace py = pybind11; -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ -namespace { +namespace +{ // Implementation note: the original iRobot TimersManager associated with the rclcpp // EventsExecutor maintained a heap with all outstanding timers sorted by their next @@ -55,22 +58,25 @@ namespace { // this assumption, so we can reassess this decision. constexpr size_t WARN_TIMERS_COUNT = 8; -typedef std::function ClockJumpCallbackT; +typedef std::function ClockJumpCallbackT; typedef std::function TimerResetCallbackT; -extern "C" void RclClockJumpTrampoline(const rcl_time_jump_t* time_jump, - bool before_jump, void* user_data) { +extern "C" void RclClockJumpTrampoline( + const rcl_time_jump_t * time_jump, + bool before_jump, void * user_data) +{ // rcl calls this both before and after a clock change, and we never want the before // callback, so let's just eliminate that case early. if (before_jump) { return; } - auto cb = reinterpret_cast(user_data); + auto cb = reinterpret_cast(user_data); (*cb)(time_jump); } -extern "C" void RclTimerResetTrampoline(const void* user_data, size_t) { - auto cb = reinterpret_cast(user_data); +extern "C" void RclTimerResetTrampoline(const void * user_data, size_t) +{ + auto cb = reinterpret_cast(user_data); (*cb)(); } @@ -80,31 +86,33 @@ extern "C" void RclTimerResetTrampoline(const void* user_data, size_t) { /// (including construction and destruction) are assumed to happen on the thread running /// the provided asio executor. class RclTimersManager::ClockManager { - public: - ClockManager(const boost::asio::any_io_executor& executor, rcl_clock_t* clock) - : executor_(executor), clock_(clock) { +public: + ClockManager(const boost::asio::any_io_executor & executor, rcl_clock_t * clock) + : executor_(executor), clock_(clock) + { // Need to establish a clock jump callback so we can tell when debug time is // updated. rcl_jump_threshold_t threshold{ - .on_clock_change = true, .min_forward = 1, .min_backward = -1}; + .on_clock_change = true, .min_forward = 1, .min_backward = -1}; // Note, this callback could happen on any thread - jump_cb_ = [this](const rcl_time_jump_t* time_jump) { - bool on_debug{}; - switch (time_jump->clock_change) { - case RCL_ROS_TIME_NO_CHANGE: - case RCL_ROS_TIME_ACTIVATED: - on_debug = true; - break; - case RCL_ROS_TIME_DEACTIVATED: - case RCL_SYSTEM_TIME_NO_CHANGE: - on_debug = false; - break; - } - boost::asio::post(executor_, + jump_cb_ = [this](const rcl_time_jump_t * time_jump) { + bool on_debug{}; + switch (time_jump->clock_change) { + case RCL_ROS_TIME_NO_CHANGE: + case RCL_ROS_TIME_ACTIVATED: + on_debug = true; + break; + case RCL_ROS_TIME_DEACTIVATED: + case RCL_SYSTEM_TIME_NO_CHANGE: + on_debug = false; + break; + } + boost::asio::post(executor_, std::bind(&ClockManager::HandleJump, this, on_debug)); - }; + }; if (RCL_RET_OK != rcl_clock_add_jump_callback(clock_, threshold, - RclClockJumpTrampoline, &jump_cb_)) { + RclClockJumpTrampoline, &jump_cb_)) + { throw std::runtime_error(std::string("Failed to set RCL clock jump callback: ") + rcl_get_error_string().str); } @@ -112,8 +120,8 @@ class RclTimersManager::ClockManager { // This isn't necessary yet but every timer will eventually depend on it. Again, // this could happen on any thread. reset_cb_ = [this]() { - boost::asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); - }; + boost::asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); + }; // Initialize which timebase we're on if (clock_->type == RCL_ROS_TIME) { @@ -125,9 +133,11 @@ class RclTimersManager::ClockManager { } } - ~ClockManager() { + ~ClockManager() + { if (RCL_RET_OK != - rcl_clock_remove_jump_callback(clock_, RclClockJumpTrampoline, &jump_cb_)) { + rcl_clock_remove_jump_callback(clock_, RclClockJumpTrampoline, &jump_cb_)) + { py::gil_scoped_acquire gil_acquire; py::print(std::string("Failed to remove RCL clock jump callback: ") + rcl_get_error_string().str); @@ -137,12 +147,14 @@ class RclTimersManager::ClockManager { } } - bool empty() const { return timers_.empty(); } + bool empty() const {return timers_.empty();} - void AddTimer(rcl_timer_t* timer, std::function ready_callback) { + void AddTimer(rcl_timer_t * timer, std::function ready_callback) + { // All timers have the same reset callback if (RCL_RET_OK != - rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) { + rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) + { throw std::runtime_error(std::string("Failed to set timer reset callback: ") + rcl_get_error_string().str); } @@ -154,7 +166,8 @@ class RclTimersManager::ClockManager { UpdateTimers(); } - void RemoveTimer(rcl_timer_t* timer) { + void RemoveTimer(rcl_timer_t * timer) + { auto it = timers_.find(timer); if (it == timers_.end()) { throw py::key_error("Attempt to remove unmanaged timer"); @@ -171,17 +184,19 @@ class RclTimersManager::ClockManager { // so we won't bother. } - private: - void HandleJump(bool on_debug_time) { +private: + void HandleJump(bool on_debug_time) + { on_debug_time_ = on_debug_time; UpdateTimers(); } - void UpdateTimers() { + void UpdateTimers() + { // First, evaluate all of our timers and dispatch any that are ready now. While // we're at it, keep track of the earliest next timer callback that is due. std::optional next_ready_ns; - for (const auto& timer_cb_pair : timers_) { + for (const auto & timer_cb_pair : timers_) { auto next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); if (next_call_ns <= 0) { // This just notifies RCL that we're considering the timer triggered, for the @@ -220,12 +235,12 @@ class RclTimersManager::ClockManager { // don't wake early and find nothing to do. const int64_t sleep_us = (*next_ready_ns + 999) / 1000; next_update_wait_.expires_from_now(boost::posix_time::microseconds(sleep_us)); - next_update_wait_.async_wait([this](const boost::system::error_code& ec) { - if (!ec) { - UpdateTimers(); - } else if (ec != boost::asio::error::operation_aborted) { - throw std::runtime_error("Error waiting for next timer: " + ec.message()); - } + next_update_wait_.async_wait([this](const boost::system::error_code & ec) { + if (!ec) { + UpdateTimers(); + } else if (ec != boost::asio::error::operation_aborted) { + throw std::runtime_error("Error waiting for next timer: " + ec.message()); + } }); } else { next_update_wait_.cancel(); @@ -235,10 +250,11 @@ class RclTimersManager::ClockManager { /// Returns the number of nanoseconds until the next callback on the given timer is /// due. Value may be negative or zero if callback time has already been reached. /// Returns std::nullopt if the timer is canceled. - static std::optional GetNextCallNanoseconds(const rcl_timer_t* rcl_timer) { + static std::optional GetNextCallNanoseconds(const rcl_timer_t * rcl_timer) + { int64_t time_until_next_call{}; const rcl_ret_t ret = - rcl_timer_get_time_until_next_call(rcl_timer, &time_until_next_call); + rcl_timer_get_time_until_next_call(rcl_timer, &time_until_next_call); switch (ret) { case RCL_RET_OK: return time_until_next_call; @@ -251,23 +267,25 @@ class RclTimersManager::ClockManager { } boost::asio::any_io_executor executor_; - rcl_clock_t* const clock_; + rcl_clock_t * const clock_; ClockJumpCallbackT jump_cb_; TimerResetCallbackT reset_cb_; bool on_debug_time_{}; - std::unordered_map> timers_; + std::unordered_map> timers_; boost::asio::deadline_timer next_update_wait_{executor_}; }; -RclTimersManager::RclTimersManager(const boost::asio::any_io_executor& executor) - : executor_(executor) {} +RclTimersManager::RclTimersManager(const boost::asio::any_io_executor & executor) +: executor_(executor) {} RclTimersManager::~RclTimersManager() {} -namespace { -rcl_clock_t* GetTimerClock(rcl_timer_t* timer) { - rcl_clock_t* clock{}; +namespace +{ +rcl_clock_t * GetTimerClock(rcl_timer_t * timer) +{ + rcl_clock_t * clock{}; if (RCL_RET_OK != rcl_timer_clock(timer, &clock)) { throw std::runtime_error(std::string("Failed to determine clock for timer: ") + rcl_get_error_string().str); @@ -276,11 +294,13 @@ rcl_clock_t* GetTimerClock(rcl_timer_t* timer) { } } // namespace -void RclTimersManager::AddTimer(rcl_timer_t* timer, - std::function ready_callback) { +void RclTimersManager::AddTimer( + rcl_timer_t * timer, + std::function ready_callback) +{ // Figure out the clock this timer is using, make sure a manager exists for that // clock, then forward the timer to that clock's manager. - rcl_clock_t* clock = GetTimerClock(timer); + rcl_clock_t * clock = GetTimerClock(timer); auto it = clock_managers_.find(clock); if (it == clock_managers_.end()) { std::tie(it, std::ignore) = clock_managers_.insert( @@ -289,8 +309,9 @@ void RclTimersManager::AddTimer(rcl_timer_t* timer, it->second->AddTimer(timer, ready_callback); } -void RclTimersManager::RemoveTimer(rcl_timer_t* timer) { - const rcl_clock_t* clock = GetTimerClock(timer); +void RclTimersManager::RemoveTimer(rcl_timer_t * timer) +{ + const rcl_clock_t * clock = GetTimerClock(timer); auto it = clock_managers_.find(clock); if (it == clock_managers_.end()) { throw py::key_error("Attempt to remove timer from unmanaged clock"); @@ -301,13 +322,15 @@ void RclTimersManager::RemoveTimer(rcl_timer_t* timer) { } } -TimersManager::TimersManager(const boost::asio::any_io_executor& executor, - std::function timer_ready_callback) - : rcl_manager_(executor), ready_callback_(timer_ready_callback) {} +TimersManager::TimersManager( + const boost::asio::any_io_executor & executor, + std::function timer_ready_callback) +: rcl_manager_(executor), ready_callback_(timer_ready_callback) {} TimersManager::~TimersManager() {} -void TimersManager::AddTimer(py::handle timer) { +void TimersManager::AddTimer(py::handle timer) +{ PyRclMapping mapping; py::handle handle = timer.attr("handle"); mapping.with = std::make_unique(handle); @@ -316,7 +339,8 @@ void TimersManager::AddTimer(py::handle timer) { timer_mappings_[timer] = std::move(mapping); } -void TimersManager::RemoveTimer(py::handle timer) { +void TimersManager::RemoveTimer(py::handle timer) +{ const auto it = timer_mappings_.find(timer); if (it == timer_mappings_.end()) { throw py::key_error("Attempt to remove unmanaged timer"); diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp index 314dac9b4..00f51c968 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.hpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -1,4 +1,4 @@ -// Copyright 2024 Brad Martin +// Copyright 2024-2025 Brad Martin // Copyright 2024 Merlin Labs, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,64 +14,68 @@ // limitations under the License. #pragma once +#include + +#include +#include + #include #include #include #include -#include - -#include -#include - #include "events_executor/python_hasher.hpp" #include "events_executor/scoped_with.hpp" -namespace rclpy { -namespace events_executor { +namespace rclpy +{ +namespace events_executor +{ /// This class manages low-level rcl timers in the system on behalf of EventsExecutor. class RclTimersManager { - public: - explicit RclTimersManager(const boost::asio::any_io_executor&); +public: + explicit RclTimersManager(const boost::asio::any_io_executor &); ~RclTimersManager(); - void AddTimer(rcl_timer_t*, std::function ready_callback); - void RemoveTimer(rcl_timer_t*); + void AddTimer(rcl_timer_t *, std::function ready_callback); + void RemoveTimer(rcl_timer_t *); - private: +private: boost::asio::any_io_executor executor_; class ClockManager; /// Handlers for each distinct clock source in the system. - std::unordered_map> clock_managers_; + std::unordered_map> clock_managers_; }; /// This class manages rclpy.Timer Python objects on behalf of EventsExecutor. class TimersManager { - public: +public: /// @p timer_ready_callback will be invoked with the timer handle whenever a managed /// timer is ready for servicing. - TimersManager(const boost::asio::any_io_executor&, - std::function timer_ready_callback); + TimersManager( + const boost::asio::any_io_executor &, + std::function timer_ready_callback); ~TimersManager(); /// Accessor for underlying rcl timer manager, for management of non-Python timers. - RclTimersManager& rcl_manager() { return rcl_manager_; } + RclTimersManager & rcl_manager() {return rcl_manager_;} // Both of these methods expect the GIL to be held when they are called. void AddTimer(pybind11::handle timer); void RemoveTimer(pybind11::handle timer); - private: - struct PyRclMapping { +private: + struct PyRclMapping + { /// Marks the corresponding Python object as in-use for as long as we're using the /// rcl pointer derived from it. std::unique_ptr with; /// The underlying rcl object - rcl_timer_t* rcl_ptr{}; + rcl_timer_t * rcl_ptr{}; }; RclTimersManager rcl_manager_; diff --git a/rclpy/test/test_events_executor.py b/rclpy/test/test_events_executor.py index 8addb8ba1..dcd2fae3c 100644 --- a/rclpy/test/test_events_executor.py +++ b/rclpy/test/test_events_executor.py @@ -1,4 +1,4 @@ -# Copyright 2024 Brad Martin +# Copyright 2024-2025 Brad Martin # Copyright 2024 Merlin Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -46,7 +46,7 @@ class SubTestNode(rclpy.node.Node): """Node to test subscriptions and subscription-related events.""" def __init__(self, *, transient_local: bool = False): - super().__init__("test_sub_node") + super().__init__('test_sub_node') self._new_pub_future: rclpy.Future[ rclpy.event_handler.QoSSubscriptionMatchedInfo ] | None = None @@ -55,7 +55,7 @@ def __init__(self, *, transient_local: bool = False): test_msgs.msg.BasicTypes, # This node seems to get stale discovery data and then complain about QoS # changes if we reuse the same topic name. - "test_topic" + ("_transient_local" if transient_local else ""), + 'test_topic' + ('_transient_local' if transient_local else ''), self._handle_sub, _get_pub_sub_qos(transient_local), event_callbacks=rclpy.event_handler.SubscriptionEventCallbacks( @@ -92,13 +92,13 @@ class PubTestNode(rclpy.node.Node): """Node to test publications and publication-related events.""" def __init__(self, *, transient_local: bool = False): - super().__init__("test_pub_node") + super().__init__('test_pub_node') self._new_sub_future: rclpy.Future[ rclpy.event_handler.QoSPublisherMatchedInfo ] | None = None self._pub = self.create_publisher( test_msgs.msg.BasicTypes, - "test_topic" + ("_transient_local" if transient_local else ""), + 'test_topic' + ('_transient_local' if transient_local else ''), _get_pub_sub_qos(transient_local), event_callbacks=rclpy.event_handler.PublisherEventCallbacks( matched=self._handle_matched_pub @@ -127,19 +127,20 @@ class ServiceServerTestNode(rclpy.node.Node): """Node to test service server-side operation.""" def __init__(self): - super().__init__("test_service_server_node") + super().__init__('test_service_server_node') self._got_request_future: rclpy.Future[ test_msgs.srv.BasicTypes.Request ] | None = None self._pending_response: test_msgs.srv.BasicTypes.Response | None = None self.create_service( - test_msgs.srv.BasicTypes, "test_service", self._handle_request + test_msgs.srv.BasicTypes, 'test_service', self._handle_request ) def expect_request( self, success: bool, error_msg: str ) -> rclpy.Future[test_msgs.srv.BasicTypes.Request]: - """Expect an incoming request. + """ + Expect an incoming request. The arguments are used to compose the response. """ @@ -167,8 +168,8 @@ class ServiceClientTestNode(rclpy.node.Node): """Node to test service client-side operation.""" def __init__(self): - super().__init__("test_service_client_node") - self._client = self.create_client(test_msgs.srv.BasicTypes, "test_service") + super().__init__('test_service_client_node') + self._client = self.create_client(test_msgs.srv.BasicTypes, 'test_service') def issue_request( self, value: float @@ -183,7 +184,7 @@ class TimerTestNode(rclpy.node.Node): def __init__( self, index: int = 0, parameter_overrides: list[rclpy.Parameter] | None = None ): - super().__init__(f"test_timer{index}", parameter_overrides=parameter_overrides) + super().__init__(f'test_timer{index}', parameter_overrides=parameter_overrides) self._timer_events = 0 self._tick_future: rclpy.Future[None] | None = None self._timer = self.create_timer(0.1, self._handle_timer) @@ -207,11 +208,11 @@ class ClockPublisherNode(rclpy.node.Node): """Node to publish rostime clock updates.""" def __init__(self): - super().__init__("clock_node") + super().__init__('clock_node') self._now = rclpy.time.Time() self._pub = self.create_publisher( rosgraph_msgs.msg.Clock, - "/clock", + '/clock', rclpy.qos.QoSProfile( depth=1, reliability=rclpy.qos.ReliabilityPolicy.BEST_EFFORT ), @@ -227,8 +228,8 @@ class ActionServerTestNode(rclpy.node.Node): def __init__(self): super().__init__( - "test_action_server_node", - parameter_overrides=[rclpy.Parameter("use_sim_time", value=True)], + 'test_action_server_node', + parameter_overrides=[rclpy.Parameter('use_sim_time', value=True)], ) self._got_goal_future: rclpy.Future[ test_msgs.action.Fibonacci.Goal @@ -236,7 +237,7 @@ def __init__(self): self._srv = rclpy.action.ActionServer( self, test_msgs.action.Fibonacci, - "test_action", + 'test_action', self._handle_execute, handle_accepted_callback=self._handle_accepted, result_timeout=10, @@ -260,7 +261,8 @@ def _handle_accepted( # Wait to finish until instructed by test def advance_feedback(self) -> list[int] | None: - """Add an entry to the result in progress and sends a feedback message. + """ + Add an entry to the result in progress and sends a feedback message. Returns the current sequence in progress if incomplete, or None if the sequence is complete and it's time to complete the operation instead. @@ -281,7 +283,8 @@ def advance_feedback(self) -> list[int] | None: return self._sequence def execute(self) -> rclpy.action.server.ServerGoalHandle: - """Completes the action in progress. + """ + Completes the action in progress. Returns the handle to the goal executed. @@ -305,9 +308,9 @@ class ActionClientTestNode(rclpy.node.Node): """Node to test action client-side operation.""" def __init__(self): - super().__init__("test_action_client_node") + super().__init__('test_action_client_node') self._client = rclpy.action.ActionClient( - self, test_msgs.action.Fibonacci, "test_action" + self, test_msgs.action.Fibonacci, 'test_action' ) self._feedback_future: rclpy.Future[ test_msgs.action.Fibonacci.Feedback @@ -319,7 +322,8 @@ def __init__(self): def send_goal( self, order: int ) -> rclpy.Future[rclpy.action.client.ClientGoalHandle]: - """Send a new goal. + """ + Send a new goal. The future will contain the goal handle when the goal submission response has been received. @@ -372,10 +376,11 @@ def _handle_result_response( class TestEventsExecutor(unittest.TestCase): + def setUp(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # Prevent nodes under test from discovering other random stuff to talk to - os.environ["ROS_AUTOMATIC_DISCOVERY_RANGE"] = "OFF" + os.environ['ROS_AUTOMATIC_DISCOVERY_RANGE'] = 'OFF' rclpy.init() self.executor = rclpy.experimental.EventsExecutor() @@ -531,7 +536,7 @@ def handle_message(future: rclpy.Future[test_msgs.msg.BasicTypes]) -> None: def test_service(self) -> None: server_node = ServiceServerTestNode() - got_request_future = server_node.expect_request(True, "test response 0") + got_request_future = server_node.expect_request(True, 'test response 0') self.executor.add_node(server_node) self._expect_future_not_done(got_request_future) @@ -542,10 +547,10 @@ def test_service(self) -> None: got_response_future = client_node.issue_request(7.1) self._check_service_request_future(got_request_future, 7.1) got_request_future = server_node.expect_request( - True, f"test response {i + 1}" + True, f'test response {i + 1}' ) self._check_service_response_future( - got_response_future, True, f"test response {i}" + got_response_future, True, f'test response {i}' ) # Destroy server node and retry issuing a request @@ -559,7 +564,7 @@ def test_service(self) -> None: def test_timers(self) -> None: realtime_node = TimerTestNode(index=0) rostime_node = TimerTestNode( - index=1, parameter_overrides=[rclpy.Parameter("use_sim_time", value=True)] + index=1, parameter_overrides=[rclpy.Parameter('use_sim_time', value=True)] ) clock_node = ClockPublisherNode() for node in [realtime_node, rostime_node, clock_node]: @@ -655,5 +660,5 @@ def test_action(self) -> None: self.assertFalse(goal_acknowledged_future.done()) # Already waited a bit -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() From 9722f08b6840ee24f27f59177180c74cc3b8f1f0 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 7 Jan 2025 18:09:55 -0500 Subject: [PATCH 05/31] Switch to non-boost asio library Signed-off-by: Brad Martin --- rclpy/CMakeLists.txt | 5 +-- rclpy/package.xml | 3 +- .../rclpy/events_executor/events_executor.cpp | 16 ++++----- .../rclpy/events_executor/events_executor.hpp | 8 ++--- .../src/rclpy/events_executor/rcl_support.cpp | 6 ++-- .../src/rclpy/events_executor/rcl_support.hpp | 6 ++-- .../rclpy/events_executor/timers_manager.cpp | 33 ++++++++----------- .../rclpy/events_executor/timers_manager.hpp | 8 ++--- 8 files changed, 41 insertions(+), 44 deletions(-) diff --git a/rclpy/CMakeLists.txt b/rclpy/CMakeLists.txt index 948fdf541..cd6905035 100644 --- a/rclpy/CMakeLists.txt +++ b/rclpy/CMakeLists.txt @@ -17,7 +17,8 @@ endif() find_package(ament_cmake REQUIRED) find_package(ament_cmake_python REQUIRED) -find_package(Boost REQUIRED) +find_package(asio_cmake_module REQUIRED) +find_package(ASIO REQUIRED) find_package(lifecycle_msgs REQUIRED) find_package(rcl REQUIRED) find_package(rcl_action REQUIRED) @@ -125,7 +126,7 @@ target_include_directories(_rclpy_pybind11 PRIVATE src/rclpy/ ) target_link_libraries(_rclpy_pybind11 PRIVATE - Boost::boost + ${ASIO_LIBRARIES} lifecycle_msgs::lifecycle_msgs__rosidl_generator_c lifecycle_msgs::lifecycle_msgs__rosidl_typesupport_c rcl::rcl diff --git a/rclpy/package.xml b/rclpy/package.xml index 4a2fb0f87..19a3bfc1b 100644 --- a/rclpy/package.xml +++ b/rclpy/package.xml @@ -16,8 +16,9 @@ William Woodall ament_cmake + asio_cmake_module - boost + asio pybind11_vendor python3-dev rcpputils diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 1224b9d9c..2f03325e0 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -34,7 +34,7 @@ #include #include -#include +#include namespace pl = std::placeholders; namespace py = pybind11; @@ -62,7 +62,7 @@ EventsExecutor::EventsExecutor(py::object context) // just establish our own signal handling directly instead. This unfortunately // bypasses the rclpy.init() options that allow a user to disable certain signal // handlers, but it doesn't look like we can do any better. - signals_.async_wait([this](const boost::system::error_code & ec, int) { + signals_.async_wait([this](const asio::error_code & ec, int) { if (!ec) { py::gil_scoped_acquire gil_acquire; // Don't call context.try_shutdown() here, because that can call back to us to @@ -91,8 +91,8 @@ pybind11::object EventsExecutor::create_task( // GIL is not held. We'll do manual refcounting on it instead. py::handle cb_task_handle = task; cb_task_handle.inc_ref(); - boost::asio::post(io_context_, - std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); + asio::post(io_context_, + std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); return task; } @@ -151,7 +151,7 @@ void EventsExecutor::wake() { if (!wake_pending_.exchange(true)) { // Update tracked entities. - boost::asio::post(io_context_, [this]() { + asio::post(io_context_, [this]() { py::gil_scoped_acquire gil_acquire; UpdateEntitiesFromNodes(!py::cast(rclpy_context_.attr("ok")())); }); @@ -175,7 +175,7 @@ void EventsExecutor::spin(std::optional timeout_sec) // touch Python will need to reacquire it though. py::gil_scoped_release gil_release; // Don't let asio auto stop if there's nothing to do - const auto work = boost::asio::make_work_guard(io_context_); + const auto work = asio::make_work_guard(io_context_); if (timeout_sec) { io_context_.run_for(std::chrono::duration_cast( std::chrono::duration(*timeout_sec))); @@ -201,7 +201,7 @@ void EventsExecutor::spin_once(std::optional timeout_sec) // touch Python will need to reacquire it though. py::gil_scoped_release gil_release; // Don't let asio auto stop if there's nothing to do - const auto work = boost::asio::make_work_guard(io_context_); + const auto work = asio::make_work_guard(io_context_); if (timeout_sec) { io_context_.run_one_for(std::chrono::duration_cast( std::chrono::duration(*timeout_sec))); @@ -866,7 +866,7 @@ void EventsExecutor::IterateTask(py::handle task) // TODO(bmartin427) Not sure this is correct; in particular, it's unclear how a task // that needs to wait a while can avoid either blocking or spinning. Revisit when // asyncio support is intentionally added. - boost::asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, task)); + asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, task)); } } diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 689df19a2..8652907f7 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -31,8 +31,8 @@ #include #include -#include -#include +#include +#include #include "rcl_support.hpp" #include "scoped_with.hpp" @@ -170,8 +170,8 @@ class EventsExecutor { const pybind11::object asyncio_run_; const pybind11::object rclpy_task_; - boost::asio::io_context io_context_; - boost::asio::signal_set signals_; + asio::io_context io_context_; + asio::signal_set signals_; std::recursive_mutex nodes_mutex_; ///< Protects the node set pybind11::set nodes_; ///< The set of all nodes we're executing diff --git a/rclpy/src/rclpy/events_executor/rcl_support.cpp b/rclpy/src/rclpy/events_executor/rcl_support.cpp index 393ce5d2c..8eae4e064 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.cpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.cpp @@ -18,7 +18,7 @@ #include #include -#include +#include namespace py = pybind11; @@ -35,7 +35,7 @@ extern "C" void RclEventCallbackTrampoline( (*cb)(number_of_events); } -RclCallbackManager::RclCallbackManager(const boost::asio::any_io_executor & executor) +RclCallbackManager::RclCallbackManager(const asio::any_io_executor & executor) : executor_(executor) {} RclCallbackManager::~RclCallbackManager() @@ -64,7 +64,7 @@ const void * RclCallbackManager::MakeCallback( CbEntry new_entry; new_entry.cb = std::make_unique>( [this, callback, key](size_t number_of_events) { - boost::asio::post(executor_, [this, callback, key, number_of_events]() { + asio::post(executor_, [this, callback, key, number_of_events]() { if (!owned_cbs_.count(key)) { // This callback has been removed, just drop it as the objects it may want // to touch may no longer exist. diff --git a/rclpy/src/rclpy/events_executor/rcl_support.hpp b/rclpy/src/rclpy/events_executor/rcl_support.hpp index 8b29e60ca..4d5d1be60 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.hpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.hpp @@ -26,7 +26,7 @@ #include #include -#include +#include #include "scoped_with.hpp" @@ -51,7 +51,7 @@ class RclCallbackManager { /// All user callbacks will be posted on the @p executor given to the constructor. /// These callbacks will be invoked without the Python Global Interpreter Lock held, /// so if they need to access Python at all make sure to acquire that explicitly. - explicit RclCallbackManager(const boost::asio::any_io_executor & executor); + explicit RclCallbackManager(const asio::any_io_executor & executor); ~RclCallbackManager(); /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a @@ -77,7 +77,7 @@ class RclCallbackManager { std::shared_ptr with; }; - boost::asio::any_io_executor executor_; + asio::any_io_executor executor_; /// The map key is the raw pointer to the RCL entity object (subscription, etc) /// associated with the callback. diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index 0e62d241f..fab5bc195 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -16,16 +16,15 @@ #include +#include #include #include #include #include #include -#include -#include -#include -#include +#include +#include #include "rcl_support.hpp" @@ -87,7 +86,7 @@ extern "C" void RclTimerResetTrampoline(const void * user_data, size_t) /// the provided asio executor. class RclTimersManager::ClockManager { public: - ClockManager(const boost::asio::any_io_executor & executor, rcl_clock_t * clock) + ClockManager(const asio::any_io_executor & executor, rcl_clock_t * clock) : executor_(executor), clock_(clock) { // Need to establish a clock jump callback so we can tell when debug time is @@ -107,8 +106,7 @@ class RclTimersManager::ClockManager { on_debug = false; break; } - boost::asio::post(executor_, - std::bind(&ClockManager::HandleJump, this, on_debug)); + asio::post(executor_, std::bind(&ClockManager::HandleJump, this, on_debug)); }; if (RCL_RET_OK != rcl_clock_add_jump_callback(clock_, threshold, RclClockJumpTrampoline, &jump_cb_)) @@ -120,7 +118,7 @@ class RclTimersManager::ClockManager { // This isn't necessary yet but every timer will eventually depend on it. Again, // this could happen on any thread. reset_cb_ = [this]() { - boost::asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); + asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); }; // Initialize which timebase we're on @@ -217,7 +215,7 @@ class RclTimersManager::ClockManager { // Post the user callback to be invoked later once timing-sensitive code is // done. - boost::asio::post(executor_, timer_cb_pair.second); + asio::post(executor_, timer_cb_pair.second); // Update time until *next* call. next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); @@ -231,14 +229,11 @@ class RclTimersManager::ClockManager { // anticipate the next timer being ready. If we are, we'll just re-check // everything at the next jump callback. if (!on_debug_time_ && next_ready_ns) { - // Boost doesn't support nanoseconds by default. Round the conversion up so we - // don't wake early and find nothing to do. - const int64_t sleep_us = (*next_ready_ns + 999) / 1000; - next_update_wait_.expires_from_now(boost::posix_time::microseconds(sleep_us)); - next_update_wait_.async_wait([this](const boost::system::error_code & ec) { + next_update_wait_.expires_from_now(std::chrono::nanoseconds(*next_ready_ns)); + next_update_wait_.async_wait([this](const asio::error_code & ec) { if (!ec) { UpdateTimers(); - } else if (ec != boost::asio::error::operation_aborted) { + } else if (ec != asio::error::operation_aborted) { throw std::runtime_error("Error waiting for next timer: " + ec.message()); } }); @@ -266,17 +261,17 @@ class RclTimersManager::ClockManager { } } - boost::asio::any_io_executor executor_; + asio::any_io_executor executor_; rcl_clock_t * const clock_; ClockJumpCallbackT jump_cb_; TimerResetCallbackT reset_cb_; bool on_debug_time_{}; std::unordered_map> timers_; - boost::asio::deadline_timer next_update_wait_{executor_}; + asio::steady_timer next_update_wait_{executor_}; }; -RclTimersManager::RclTimersManager(const boost::asio::any_io_executor & executor) +RclTimersManager::RclTimersManager(const asio::any_io_executor & executor) : executor_(executor) {} RclTimersManager::~RclTimersManager() {} @@ -323,7 +318,7 @@ void RclTimersManager::RemoveTimer(rcl_timer_t * timer) } TimersManager::TimersManager( - const boost::asio::any_io_executor & executor, + const asio::any_io_executor & executor, std::function timer_ready_callback) : rcl_manager_(executor), ready_callback_(timer_ready_callback) {} diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp index 00f51c968..d173d1cdb 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.hpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -23,7 +23,7 @@ #include #include -#include +#include #include "events_executor/python_hasher.hpp" #include "events_executor/scoped_with.hpp" @@ -36,14 +36,14 @@ namespace events_executor /// This class manages low-level rcl timers in the system on behalf of EventsExecutor. class RclTimersManager { public: - explicit RclTimersManager(const boost::asio::any_io_executor &); + explicit RclTimersManager(const asio::any_io_executor &); ~RclTimersManager(); void AddTimer(rcl_timer_t *, std::function ready_callback); void RemoveTimer(rcl_timer_t *); private: - boost::asio::any_io_executor executor_; + asio::any_io_executor executor_; class ClockManager; /// Handlers for each distinct clock source in the system. @@ -56,7 +56,7 @@ class TimersManager { /// @p timer_ready_callback will be invoked with the timer handle whenever a managed /// timer is ready for servicing. TimersManager( - const boost::asio::any_io_executor &, + const asio::any_io_executor &, std::function timer_ready_callback); ~TimersManager(); From 1570aac710e0ccd3245bd46754ddda281dd8070f Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Thu, 9 Jan 2025 12:47:33 -0500 Subject: [PATCH 06/31] Use internal _rclpy C++ types instead of jumping through Python hoops Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 104 +++++++++--------- .../rclpy/events_executor/events_executor.hpp | 39 +++---- .../src/rclpy/events_executor/rcl_support.cpp | 38 ------- .../src/rclpy/events_executor/rcl_support.hpp | 36 +----- .../rclpy/events_executor/timers_manager.cpp | 4 +- 5 files changed, 68 insertions(+), 153 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 2f03325e0..f16338ff7 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -36,6 +36,11 @@ #include +#include "client.hpp" +#include "context.hpp" +#include "service.hpp" +#include "subscription.hpp" + namespace pl = std::placeholders; namespace py = pybind11; @@ -313,7 +318,7 @@ void EventsExecutor::HandleAddedSubscription(py::handle subscription) { py::handle handle = subscription.attr("handle"); auto with = std::make_shared(handle); - const rcl_subscription_t * rcl_ptr = GetRclSubscription(handle); + const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback( @@ -329,7 +334,7 @@ void EventsExecutor::HandleAddedSubscription(py::handle subscription) void EventsExecutor::HandleRemovedSubscription(py::handle subscription) { py::handle handle = subscription.attr("handle"); - const rcl_subscription_t * rcl_ptr = GetRclSubscription(handle); + const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) { @@ -414,7 +419,7 @@ void EventsExecutor::HandleAddedClient(py::handle client) { py::handle handle = client.attr("handle"); auto with = std::make_shared(handle); - const rcl_client_t * rcl_ptr = GetRclClient(handle); + const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleClientReady, this, client, pl::_1); if (RCL_RET_OK != rcl_client_set_on_new_response_callback( rcl_ptr, RclEventCallbackTrampoline, @@ -429,7 +434,7 @@ void EventsExecutor::HandleAddedClient(py::handle client) void EventsExecutor::HandleRemovedClient(py::handle client) { py::handle handle = client.attr("handle"); - const rcl_client_t * rcl_ptr = GetRclClient(handle); + const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); if (RCL_RET_OK != rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) { @@ -483,7 +488,7 @@ void EventsExecutor::HandleAddedService(py::handle service) { py::handle handle = service.attr("handle"); auto with = std::make_shared(handle); - const rcl_service_t * rcl_ptr = GetRclService(handle); + const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleServiceReady, this, service, pl::_1); if (RCL_RET_OK != rcl_service_set_on_new_request_callback( rcl_ptr, RclEventCallbackTrampoline, @@ -498,7 +503,7 @@ void EventsExecutor::HandleAddedService(py::handle service) void EventsExecutor::HandleRemovedService(py::handle service) { py::handle handle = service.attr("handle"); - const rcl_service_t * rcl_ptr = GetRclService(handle); + const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); if (RCL_RET_OK != rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) { @@ -557,19 +562,16 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) if (py::cast(num_entities.attr("num_guard_conditions")) != 0) { throw std::runtime_error("Guard conditions not supported"); } - const py::object _rclpy = py::module_::import("rclpy.impl.implementation_singleton") - .attr("rclpy_implementation"); - py::object wait_set = _rclpy.attr("WaitSet")( - num_entities.attr("num_subscriptions"), 0, num_entities.attr("num_timers"), - num_entities.attr("num_clients"), num_entities.attr("num_services"), - num_entities.attr("num_events"), rclpy_context_.attr("handle")); - auto with_waitset = std::make_shared(wait_set); + auto wait_set = std::make_shared( + py::cast(num_entities.attr("num_subscriptions")), 0U, + py::cast(num_entities.attr("num_timers")), + py::cast(num_entities.attr("num_clients")), + py::cast(num_entities.attr("num_services")), + py::cast(num_entities.attr("num_events")), + *py::cast(rclpy_context_.attr("handle"))); + auto with_waitset = std::make_shared(py::cast(wait_set)); waitable.attr("add_to_wait_set")(wait_set); - rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); - // Be careful not to bind any py::objects into any callbacks, because that will try to - // do reference counting without the GIL. Ownership will be maintained with the info - // struct instance which is guaranteed to outlast any callback. - py::handle ws_handle = wait_set; + rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); // We null out each entry in the waitset as we set it up, so that the waitset itself // can be reused when something becomes ready to signal to the Waitable what's ready // and what's not. We also bind with_waitset into each callback we set up, to ensure @@ -580,8 +582,9 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const rcl_subscription_t * const rcl_sub = rcl_waitset->subscriptions[i]; rcl_waitset->subscriptions[i] = nullptr; sub_entities.subscriptions.push_back(rcl_sub); - const auto cb = std::bind(&EventsExecutor::HandleWaitableSubReady, this, waitable, - rcl_sub, ws_handle, i, with_waitset, pl::_1); + const auto cb = std::bind( + &EventsExecutor::HandleWaitableSubReady, this, waitable, rcl_sub, wait_set, i, with_waitset, + pl::_1); if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback( rcl_sub, RclEventCallbackTrampoline, @@ -602,16 +605,18 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) // Since this callback doesn't go through RclCallbackManager which would otherwise // own an instance of `with_waitable` associated with this callback, we'll bind it // directly into the callback instead. - const auto cb = std::bind(&EventsExecutor::HandleWaitableTimerReady, this, waitable, - rcl_timer, ws_handle, i, with_waitable, with_waitset); + const auto cb = std::bind( + &EventsExecutor::HandleWaitableTimerReady, this, waitable, rcl_timer, wait_set, i, + with_waitable, with_waitset); timers_manager_.rcl_manager().AddTimer(rcl_timer, cb); } for (size_t i = 0; i < rcl_waitset->size_of_clients; ++i) { const rcl_client_t * const rcl_client = rcl_waitset->clients[i]; rcl_waitset->clients[i] = nullptr; sub_entities.clients.push_back(rcl_client); - const auto cb = std::bind(&EventsExecutor::HandleWaitableClientReady, this, - waitable, rcl_client, ws_handle, i, with_waitset, pl::_1); + const auto cb = std::bind( + &EventsExecutor::HandleWaitableClientReady, this, waitable, rcl_client, wait_set, i, + with_waitset, pl::_1); if (RCL_RET_OK != rcl_client_set_on_new_response_callback( rcl_client, RclEventCallbackTrampoline, @@ -627,8 +632,9 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const rcl_service_t * const rcl_service = rcl_waitset->services[i]; rcl_waitset->services[i] = nullptr; sub_entities.services.push_back(rcl_service); - const auto cb = std::bind(&EventsExecutor::HandleWaitableServiceReady, this, - waitable, rcl_service, ws_handle, i, with_waitset, pl::_1); + const auto cb = std::bind( + &EventsExecutor::HandleWaitableServiceReady, this, waitable, rcl_service, wait_set, i, + with_waitset, pl::_1); if (RCL_RET_OK != rcl_service_set_on_new_request_callback( rcl_service, RclEventCallbackTrampoline, @@ -644,8 +650,9 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const rcl_event_t * const rcl_event = rcl_waitset->events[i]; rcl_waitset->events[i] = nullptr; sub_entities.events.push_back(rcl_event); - const auto cb = std::bind(&EventsExecutor::HandleWaitableEventReady, this, waitable, - rcl_event, ws_handle, i, with_waitset, pl::_1); + const auto cb = std::bind( + &EventsExecutor::HandleWaitableEventReady, this, waitable, rcl_event, wait_set, i, + with_waitset, pl::_1); if (RCL_RET_OK != rcl_event_set_callback(rcl_event, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback( rcl_event, cb, with_waitable))) @@ -713,14 +720,14 @@ void EventsExecutor::HandleRemovedWaitable(py::handle waitable) } void EventsExecutor::HandleWaitableSubReady( - py::handle waitable, const rcl_subscription_t * rcl_sub, py::handle wait_set, + py::handle waitable, const rcl_subscription_t * rcl_sub, std::shared_ptr wait_set, size_t wait_set_sub_index, std::shared_ptr, size_t number_of_events) { py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our subscription object is // ready, and then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->subscriptions[wait_set_sub_index] = rcl_sub; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. @@ -728,18 +735,14 @@ void EventsExecutor::HandleWaitableSubReady( } void EventsExecutor::HandleWaitableTimerReady( - py::handle waitable, - const rcl_timer_t * rcl_timer, - py::handle wait_set, - size_t wait_set_timer_index, - std::shared_ptr, - std::shared_ptr) + py::handle waitable, const rcl_timer_t * rcl_timer, std::shared_ptr wait_set, + size_t wait_set_timer_index, std::shared_ptr, std::shared_ptr) { py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our timer object is ready, and // then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->timers[wait_set_timer_index] = rcl_timer; HandleWaitableReady(waitable, wait_set, 1); // Null out the wait set again so that other callbacks can use it on other objects. @@ -747,18 +750,14 @@ void EventsExecutor::HandleWaitableTimerReady( } void EventsExecutor::HandleWaitableClientReady( - py::handle waitable, - const rcl_client_t * rcl_client, - py::handle wait_set, - size_t wait_set_client_index, - std::shared_ptr, - size_t number_of_events) + py::handle waitable, const rcl_client_t * rcl_client, std::shared_ptr wait_set, + size_t wait_set_client_index, std::shared_ptr, size_t number_of_events) { py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our client object is ready, and // then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->clients[wait_set_client_index] = rcl_client; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. @@ -766,18 +765,14 @@ void EventsExecutor::HandleWaitableClientReady( } void EventsExecutor::HandleWaitableServiceReady( - py::handle waitable, - const rcl_service_t * rcl_service, - py::handle wait_set, - size_t wait_set_service_index, - std::shared_ptr, - size_t number_of_events) + py::handle waitable, const rcl_service_t * rcl_service, std::shared_ptr wait_set, + size_t wait_set_service_index, std::shared_ptr, size_t number_of_events) { py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our service object is ready, // and then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->services[wait_set_service_index] = rcl_service; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. @@ -785,14 +780,14 @@ void EventsExecutor::HandleWaitableServiceReady( } void EventsExecutor::HandleWaitableEventReady( - py::handle waitable, const rcl_event_t * rcl_event, py::handle wait_set, + py::handle waitable, const rcl_event_t * rcl_event, std::shared_ptr wait_set, size_t wait_set_event_index, std::shared_ptr, size_t number_of_events) { py::gil_scoped_acquire gil_acquire; // We need to set up the wait set to make it look like our event object is ready, and // then poke the Waitable to do what it needs to do from there. - rcl_wait_set_t * const rcl_waitset = GetRclWaitSet(wait_set); + rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->events[wait_set_event_index] = rcl_event; HandleWaitableReady(waitable, wait_set, number_of_events); // Null out the wait set again so that other callbacks can use it on other objects. @@ -800,8 +795,7 @@ void EventsExecutor::HandleWaitableEventReady( } void EventsExecutor::HandleWaitableReady( - py::handle waitable, py::handle wait_set, - size_t number_of_events) + py::handle waitable, std::shared_ptr wait_set, size_t number_of_events) { // Largely based on rclpy.Executor._take_waitable() // https://github.com/ros2/rclpy/blob/a19180c238d4d97ed2b58868d8fb7fa3e3b621f2/rclpy/rclpy/executors.py#L447-L454 diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 8652907f7..8812abd6c 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -34,9 +34,10 @@ #include #include -#include "rcl_support.hpp" -#include "scoped_with.hpp" -#include "timers_manager.hpp" +#include "events_executor/rcl_support.hpp" +#include "events_executor/scoped_with.hpp" +#include "events_executor/timers_manager.hpp" +#include "wait_set.hpp" namespace rclpy { @@ -121,35 +122,25 @@ class EventsExecutor { void HandleAddedWaitable(pybind11::handle); void HandleRemovedWaitable(pybind11::handle); void HandleWaitableSubReady( - pybind11::handle waitable, const rcl_subscription_t *, - pybind11::handle wait_set, size_t wait_set_sub_index, - std::shared_ptr with_waitset, - size_t number_of_events); + pybind11::handle waitable, const rcl_subscription_t *, std::shared_ptr wait_set, + size_t wait_set_sub_index, std::shared_ptr with_waitset, size_t number_of_events); void HandleWaitableTimerReady( - pybind11::handle waitable, const rcl_timer_t *, - pybind11::handle wait_set, size_t wait_set_timer_index, - std::shared_ptr with_waitable, + pybind11::handle waitable, const rcl_timer_t *, std::shared_ptr wait_set, + size_t wait_set_timer_index, std::shared_ptr with_waitable, std::shared_ptr with_waitset); void HandleWaitableClientReady( - pybind11::handle waitable, const rcl_client_t *, - pybind11::handle wait_set, - size_t wait_set_client_index, - std::shared_ptr with_waitset, + pybind11::handle waitable, const rcl_client_t *, std::shared_ptr wait_set, + size_t wait_set_client_index, std::shared_ptr with_waitset, size_t number_of_events); void HandleWaitableServiceReady( - pybind11::handle waitable, const rcl_service_t *, - pybind11::handle wait_set, - size_t wait_set_service_index, - std::shared_ptr with_waitset, + pybind11::handle waitable, const rcl_service_t *, std::shared_ptr wait_set, + size_t wait_set_service_index, std::shared_ptr with_waitset, size_t number_of_events); void HandleWaitableEventReady( - pybind11::handle waitable, const rcl_event_t *, - pybind11::handle wait_set, size_t wait_set_event_index, - std::shared_ptr with_waitset, - size_t number_of_events); + pybind11::handle waitable, const rcl_event_t *, std::shared_ptr wait_set, + size_t wait_set_event_index, std::shared_ptr with_waitset, size_t number_of_events); void HandleWaitableReady( - pybind11::handle waitable, pybind11::handle wait_set, - size_t number_of_events); + pybind11::handle waitable, std::shared_ptr wait_set, size_t number_of_events); /// Helper for create_task(). @p task needs to have had one reference manually added /// to it. See create_task() implementation for details. diff --git a/rclpy/src/rclpy/events_executor/rcl_support.cpp b/rclpy/src/rclpy/events_executor/rcl_support.cpp index 8eae4e064..ab1c01922 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.cpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.cpp @@ -14,8 +14,6 @@ // limitations under the License. #include "events_executor/rcl_support.hpp" -#include -#include #include #include @@ -86,41 +84,5 @@ void RclCallbackManager::RemoveCallback(const void * key) } } -namespace -{ -// This helper function is used for retrieving rcl pointers from _rclpy C++ objects. -// Because _rclpy doesn't install its C++ headers for public use, it's difficult to use -// the C++ classes directly. But, we can treat them as if they are Python objects using -// their defined Python API. Unfortunately the python interfaces convert the returned -// pointer to integers, so recovering that looks a little weird. -template -RclT * GetRclPtr(py::handle py_ent_handle) -{ - return reinterpret_cast(py::cast(py_ent_handle.attr("pointer"))); -} -} // namespace - -rcl_subscription_t * GetRclSubscription(py::handle sub_handle) -{ - return GetRclPtr(sub_handle); -} - -rcl_timer_t * GetRclTimer(py::handle timer_handle) -{ - return GetRclPtr(timer_handle); -} - -rcl_client_t * GetRclClient(py::handle cl_handle) -{ - return GetRclPtr(cl_handle); -} - -rcl_service_t * GetRclService(py::handle srv_handle) -{ - return GetRclPtr(srv_handle); -} - -rcl_wait_set_t * GetRclWaitSet(py::handle ws) {return GetRclPtr(ws);} - } // namespace events_executor } // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/rcl_support.hpp b/rclpy/src/rclpy/events_executor/rcl_support.hpp index 4d5d1be60..91e1db42b 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.hpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.hpp @@ -14,21 +14,14 @@ // limitations under the License. #pragma once -#include - -#include -#include -#include -#include -#include - #include +#include #include #include #include -#include "scoped_with.hpp" +#include "events_executor/scoped_with.hpp" namespace rclpy { @@ -84,30 +77,5 @@ class RclCallbackManager { std::unordered_map owned_cbs_; }; -/// Returns the RCL subscription object pointer from a subscription handle (i.e. the -/// handle attribute of an rclpy Subscription object, which is a _rclpy C++ -/// Subscription object). Assumes that a ScopedWith has already been entered on the -/// given handle. -rcl_subscription_t * GetRclSubscription(pybind11::handle); - -/// Returns the RCL timer object pointer from a timer handle (i.e. the handle attribute -/// of an rclpy Timer object, which is a _rclpy C++ Timer object). Assumes that a -/// ScopedWith has already been entered on the given handle. -rcl_timer_t * GetRclTimer(pybind11::handle); - -/// Returns the RCL client object pointer from a client handle (i.e. the handle -/// attribute of an rclpy Client object, which is a _rclpy C++ Client object). Assumes -/// that a ScopedWith has already been entered on the given handle. -rcl_client_t * GetRclClient(pybind11::handle); - -/// Returns the RCL service object pointer from a service handle (i.e. the handle -/// attribute of an rclpy Service object, which is a _rclpy C++ Service object). -/// Assumes that a ScopedWith has already been entered on the given handle. -rcl_service_t * GetRclService(pybind11::handle); - -/// Returns the RCL wait set object pointer from a _rclpy C++ WaitSet object. Assumes -/// that a ScopedWith has already been entered on the given object. -rcl_wait_set_t * GetRclWaitSet(pybind11::handle); - } // namespace events_executor } // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index fab5bc195..b84bc018a 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -26,7 +26,7 @@ #include #include -#include "rcl_support.hpp" +#include "timer.hpp" namespace py = pybind11; @@ -329,7 +329,7 @@ void TimersManager::AddTimer(py::handle timer) PyRclMapping mapping; py::handle handle = timer.attr("handle"); mapping.with = std::make_unique(handle); - mapping.rcl_ptr = GetRclTimer(handle); + mapping.rcl_ptr = py::cast(handle).rcl_ptr(); rcl_manager_.AddTimer(mapping.rcl_ptr, std::bind(ready_callback_, timer)); timer_mappings_[timer] = std::move(mapping); } From 99fdd9f707b0d6f705f13867f8554a320ad093e9 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Thu, 9 Jan 2025 21:03:50 +0000 Subject: [PATCH 07/31] Reformat with clang-format, then uncrustify again clang-format fixes things that uncrustify tends to leave alone, but then uncrustify seems slightly unhappy with that result. Also reflow comments at 100 columns. This uses the .clang-format file from the ament-clang-format package, with the exception of SortIncludes being set to false, because I didn't like what it tried to do with includes without that override. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 418 ++++++++---------- .../rclpy/events_executor/events_executor.hpp | 56 +-- .../src/rclpy/events_executor/rcl_support.cpp | 42 +- .../src/rclpy/events_executor/rcl_support.hpp | 44 +- .../src/rclpy/events_executor/scoped_with.hpp | 8 +- .../rclpy/events_executor/timers_manager.cpp | 153 +++---- .../rclpy/events_executor/timers_manager.hpp | 17 +- 7 files changed, 336 insertions(+), 402 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index f16338ff7..692e7df2d 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -55,25 +55,23 @@ EventsExecutor::EventsExecutor(py::object context) rclpy_task_(py::module_::import("rclpy.task").attr("Task")), signals_(io_context_, SIGINT, SIGTERM), rcl_callback_manager_(io_context_.get_executor()), - timers_manager_(io_context_.get_executor(), - std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) + timers_manager_( + io_context_.get_executor(), std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) { - // rclpy.Executor creates a sigint handling guard condition here. This is necessary - // because a sleeping event loop won't notice Ctrl-C unless some other event wakes - // it up otherwise. + // rclpy.Executor creates a sigint handling guard condition here. This is necessary because a + // sleeping event loop won't notice Ctrl-C unless some other event wakes it up otherwise. // - // Unfortunately it doesn't look like we can either support generic guard conditions - // or hook into the existing rclpy signal handling in any other useful way. We'll - // just establish our own signal handling directly instead. This unfortunately - // bypasses the rclpy.init() options that allow a user to disable certain signal - // handlers, but it doesn't look like we can do any better. + // Unfortunately it doesn't look like we can either support generic guard conditions or hook into + // the existing rclpy signal handling in any other useful way. We'll just establish our own + // signal handling directly instead. This unfortunately bypasses the rclpy.init() options that + // allow a user to disable certain signal handlers, but it doesn't look like we can do any better. signals_.async_wait([this](const asio::error_code & ec, int) { if (!ec) { py::gil_scoped_acquire gil_acquire; - // Don't call context.try_shutdown() here, because that can call back to us to - // request a blocking shutdown(), which doesn't make any sense because we have - // to be spinning to process the callback that's asked to wait for spinning to - // stop. We'll have to call that later outside of any spin loop. + // Don't call context.try_shutdown() here, because that can call back to us to request a + // blocking shutdown(), which doesn't make any sense because we have to be spinning to + // process the callback that's asked to wait for spinning to stop. We'll have to call that + // later outside of any spin loop. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 sigint_pending_.store(true); io_context_.stop(); @@ -84,27 +82,25 @@ EventsExecutor::EventsExecutor(py::object context) EventsExecutor::~EventsExecutor() {shutdown();} pybind11::object EventsExecutor::create_task( - py::object callback, py::args args, - const py::kwargs & kwargs) + py::object callback, py::args args, const py::kwargs & kwargs) { // Create and return a rclpy.task.Task() object, and schedule it to be called later. using py::literals::operator""_a; py::object task = rclpy_task_(callback, args, kwargs, "executor"_a = py::cast(this)); - // The Task needs to be owned at least until we invoke it from the callback we post, - // however we can't pass a bare py::object because that's going to try to do Python - // refcounting while preparing to go into or coming back from the callback, while the - // GIL is not held. We'll do manual refcounting on it instead. + // The Task needs to be owned at least until we invoke it from the callback we post, however we + // can't pass a bare py::object because that's going to try to do Python refcounting while + // preparing to go into or coming back from the callback, while the GIL is not held. We'll do + // manual refcounting on it instead. py::handle cb_task_handle = task; cb_task_handle.inc_ref(); - asio::post(io_context_, - std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); + asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); return task; } bool EventsExecutor::shutdown(std::optional timeout) { - // NOTE: The rclpy context can invoke this with a lock on the context held. Therefore - // we must not try to go access that context during this method or we can deadlock. + // NOTE: The rclpy context can invoke this with a lock on the context held. Therefore we must not + // try to go access that context during this method or we can deadlock. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/context.py#L101-L103 io_context_.stop(); @@ -133,8 +129,7 @@ bool EventsExecutor::add_node(py::object node) return false; } nodes_.add(node); - // Caution, the Node executor setter method calls executor.add_node() again making - // this reentrant. + // Caution, the Node executor setter method calls executor.add_node() again making this reentrant. node.attr("executor") = py::cast(this); wake(); return true; @@ -163,10 +158,9 @@ void EventsExecutor::wake() } } -// NOTE: The timeouts on the below two methods are always realtime even if we're running -// in debug time. This is true of other executors too, because debug time is always -// associated with a specific node and more than one node may be connected to an -// executor instance. +// NOTE: The timeouts on the below two methods are always realtime even if we're running in debug +// time. This is true of other executors too, because debug time is always associated with a +// specific node and more than one node may be connected to an executor instance. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L184-L185 void EventsExecutor::spin(std::optional timeout_sec) @@ -176,14 +170,14 @@ void EventsExecutor::spin(std::optional timeout_sec) if (!spin_lock) { throw std::runtime_error("Attempt to spin an already-spinning Executor"); } - // Release the GIL while we block. Any callbacks on the io_context that want to - // touch Python will need to reacquire it though. + // Release the GIL while we block. Any callbacks on the io_context that want to touch Python + // will need to reacquire it though. py::gil_scoped_release gil_release; // Don't let asio auto stop if there's nothing to do const auto work = asio::make_work_guard(io_context_); if (timeout_sec) { io_context_.run_for(std::chrono::duration_cast( - std::chrono::duration(*timeout_sec))); + std::chrono::duration(*timeout_sec))); } else { io_context_.run(); } @@ -202,14 +196,14 @@ void EventsExecutor::spin_once(std::optional timeout_sec) if (!spin_lock) { throw std::runtime_error("Attempt to spin an already-spinning Executor"); } - // Release the GIL while we block. Any callbacks on the io_context that want to - // touch Python will need to reacquire it though. + // Release the GIL while we block. Any callbacks on the io_context that want to touch Python + // will need to reacquire it though. py::gil_scoped_release gil_release; // Don't let asio auto stop if there's nothing to do const auto work = asio::make_work_guard(io_context_); if (timeout_sec) { io_context_.run_one_for(std::chrono::duration_cast( - std::chrono::duration(*timeout_sec))); + std::chrono::duration(*timeout_sec))); } else { io_context_.run_one(); } @@ -222,26 +216,23 @@ void EventsExecutor::spin_once(std::optional timeout_sec) } void EventsExecutor::spin_until_future_complete( - py::handle future, - std::optional timeout_sec) + py::handle future, std::optional timeout_sec) { - future.attr("add_done_callback")( - py::cpp_function([this](py::handle) {io_context_.stop();})); + future.attr("add_done_callback")(py::cpp_function([this](py::handle) {io_context_.stop();})); spin(timeout_sec); } void EventsExecutor::spin_once_until_future_complete( py::handle future, std::optional timeout_sec) { - future.attr("add_done_callback")( - py::cpp_function([this](py::handle) {io_context_.stop();})); + future.attr("add_done_callback")(py::cpp_function([this](py::handle) {io_context_.stop();})); spin_once(timeout_sec); } void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) { - // Clear pending flag as early as possible, so we error on the side of retriggering - // a few harmless updates rather than potentially missing important additions. + // Clear pending flag as early as possible, so we error on the side of retriggering a few harmless + // updates rather than potentially missing important additions. wake_pending_.store(false); // Collect all entities currently associated with our nodes @@ -260,9 +251,8 @@ void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) services.attr("update")(py::set(node.attr("services"))); waitables.attr("update")(py::set(node.attr("waitables"))); - // It doesn't seem to be possible to support guard conditions with a - // callback-based (as opposed to waitset-based) API. Fortunately we don't - // seem to need to. + // It doesn't seem to be possible to support guard conditions with a callback-based (as + // opposed to waitset-based) API. Fortunately we don't seem to need to. if (!py::set(node.attr("guards")).empty()) { throw std::runtime_error("Guard conditions not supported"); } @@ -274,21 +264,22 @@ void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) } // Perform updates for added and removed entities - UpdateEntitySet(subscriptions_, subscriptions, - std::bind(&EventsExecutor::HandleAddedSubscription, this, pl::_1), - std::bind(&EventsExecutor::HandleRemovedSubscription, this, pl::_1)); - UpdateEntitySet(timers_, timers, - std::bind(&EventsExecutor::HandleAddedTimer, this, pl::_1), - std::bind(&EventsExecutor::HandleRemovedTimer, this, pl::_1)); - UpdateEntitySet(clients_, clients, - std::bind(&EventsExecutor::HandleAddedClient, this, pl::_1), - std::bind(&EventsExecutor::HandleRemovedClient, this, pl::_1)); - UpdateEntitySet(services_, services, - std::bind(&EventsExecutor::HandleAddedService, this, pl::_1), - std::bind(&EventsExecutor::HandleRemovedService, this, pl::_1)); - UpdateEntitySet(waitables_, waitables, - std::bind(&EventsExecutor::HandleAddedWaitable, this, pl::_1), - std::bind(&EventsExecutor::HandleRemovedWaitable, this, pl::_1)); + UpdateEntitySet( + subscriptions_, subscriptions, + std::bind(&EventsExecutor::HandleAddedSubscription, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedSubscription, this, pl::_1)); + UpdateEntitySet( + timers_, timers, std::bind(&EventsExecutor::HandleAddedTimer, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedTimer, this, pl::_1)); + UpdateEntitySet( + clients_, clients, std::bind(&EventsExecutor::HandleAddedClient, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedClient, this, pl::_1)); + UpdateEntitySet( + services_, services, std::bind(&EventsExecutor::HandleAddedService, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedService, this, pl::_1)); + UpdateEntitySet( + waitables_, waitables, std::bind(&EventsExecutor::HandleAddedWaitable, this, pl::_1), + std::bind(&EventsExecutor::HandleRemovedWaitable, this, pl::_1)); if (shutdown) { // Stop spinning after everything is torn down. @@ -319,15 +310,15 @@ void EventsExecutor::HandleAddedSubscription(py::handle subscription) py::handle handle = subscription.attr("handle"); auto with = std::make_shared(handle); const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); - const auto cb = - std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); - if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback( - rcl_ptr, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + const auto cb = std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); + if ( + RCL_RET_OK != + rcl_subscription_set_on_new_message_callback( + rcl_ptr, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { throw std::runtime_error( - std::string("Failed to set the on new message callback for subscription: ") + - rcl_get_error_string().str); + std::string("Failed to set the on new message callback for subscription: ") + + rcl_get_error_string().str); } } @@ -335,42 +326,36 @@ void EventsExecutor::HandleRemovedSubscription(py::handle subscription) { py::handle handle = subscription.attr("handle"); const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); - if (RCL_RET_OK != - rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) - { + if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) { throw std::runtime_error( - std::string("Failed to clear the on new message callback for subscription: ") + - rcl_get_error_string().str); + std::string("Failed to clear the on new message callback for subscription: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_ptr); } -void EventsExecutor::HandleSubscriptionReady( - py::handle subscription, - size_t number_of_events) +void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t number_of_events) { py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_subscription() and _execute_subcription(). // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L355-L367 // - // NOTE: Simple object attributes we can count on to be owned by the parent object, - // but bound method calls and function return values need to be owned by us. + // NOTE: Simple object attributes we can count on to be owned by the parent object, but bound + // method calls and function return values need to be owned by us. const py::handle handle = subscription.attr("handle"); const py::object take_message = handle.attr("take_message"); const py::handle msg_type = subscription.attr("msg_type"); const py::handle raw = subscription.attr("raw"); - const int callback_type = - py::cast(subscription.attr("_callback_type").attr("value")); - const int message_only = py::cast( - subscription.attr("CallbackType").attr("MessageOnly").attr("value")); + const int callback_type = py::cast(subscription.attr("_callback_type").attr("value")); + const int message_only = + py::cast(subscription.attr("CallbackType").attr("MessageOnly").attr("value")); const py::handle callback = subscription.attr("callback"); - // rmw_cyclonedds has a bug which causes number_of_events to be zero in the case where - // messages were waiting for us when we registered the callback, and the topic is - // using KEEP_ALL history policy. We'll work around that by checking for zero and - // just taking messages until we start getting None in that case. - // https://github.com/ros2/rmw_cyclonedds/issues/509 + // rmw_cyclonedds has a bug which causes number_of_events to be zero in the case where messages + // were waiting for us when we registered the callback, and the topic is using KEEP_ALL history + // policy. We'll work around that by checking for zero and just taking messages until we start + // getting None in that case. https://github.com/ros2/rmw_cyclonedds/issues/509 bool got_none = false; for (size_t i = 0; number_of_events ? i < number_of_events : !got_none; ++i) { py::object msg_info = take_message(msg_type, raw); @@ -391,23 +376,17 @@ void EventsExecutor::HandleSubscriptionReady( } } -void EventsExecutor::HandleAddedTimer(py::handle timer) -{ - timers_manager_.AddTimer(timer); -} +void EventsExecutor::HandleAddedTimer(py::handle timer) {timers_manager_.AddTimer(timer);} -void EventsExecutor::HandleRemovedTimer(py::handle timer) -{ - timers_manager_.RemoveTimer(timer); -} +void EventsExecutor::HandleRemovedTimer(py::handle timer) {timers_manager_.RemoveTimer(timer);} void EventsExecutor::HandleTimerReady(py::handle timer) { py::gil_scoped_acquire gil_acquire; try { - // Unlike most rclpy objects this doesn't document whether it's a Callable or might - // be a Coroutine. Let's hope it's the former. 🤞 + // Unlike most rclpy objects this doesn't document whether it's a Callable or might be a + // Coroutine. Let's hope it's the former. timer.attr("callback")(); } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, timer, "timers"); @@ -421,13 +400,14 @@ void EventsExecutor::HandleAddedClient(py::handle client) auto with = std::make_shared(handle); const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleClientReady, this, client, pl::_1); - if (RCL_RET_OK != rcl_client_set_on_new_response_callback( - rcl_ptr, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + if ( + RCL_RET_OK != + rcl_client_set_on_new_response_callback( + rcl_ptr, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { throw std::runtime_error( - std::string("Failed to set the on new response callback for client: ") + - rcl_get_error_string().str); + std::string("Failed to set the on new response callback for client: ") + + rcl_get_error_string().str); } } @@ -435,12 +415,10 @@ void EventsExecutor::HandleRemovedClient(py::handle client) { py::handle handle = client.attr("handle"); const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); - if (RCL_RET_OK != - rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) - { + if (RCL_RET_OK != rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) { throw std::runtime_error( - std::string("Failed to clear the on new response callback for client: ") + - rcl_get_error_string().str); + std::string("Failed to clear the on new response callback for client: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_ptr); } @@ -490,13 +468,14 @@ void EventsExecutor::HandleAddedService(py::handle service) auto with = std::make_shared(handle); const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleServiceReady, this, service, pl::_1); - if (RCL_RET_OK != rcl_service_set_on_new_request_callback( - rcl_ptr, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + if ( + RCL_RET_OK != + rcl_service_set_on_new_request_callback( + rcl_ptr, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { throw std::runtime_error( - std::string("Failed to set the on new request callback for service: ") + - rcl_get_error_string().str); + std::string("Failed to set the on new request callback for service: ") + + rcl_get_error_string().str); } } @@ -504,12 +483,10 @@ void EventsExecutor::HandleRemovedService(py::handle service) { py::handle handle = service.attr("handle"); const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); - if (RCL_RET_OK != - rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) - { + if (RCL_RET_OK != rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) { throw std::runtime_error( - std::string("Failed to clear the on new request callback for service: ") + - rcl_get_error_string().str); + std::string("Failed to clear the on new request callback for service: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_ptr); } @@ -550,13 +527,12 @@ void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_eve void EventsExecutor::HandleAddedWaitable(py::handle waitable) { - // The Waitable API is too abstract for us to work with directly; it only exposes APIs - // for dealing with wait sets, and all of the rcl callback API requires knowing - // exactly what kinds of rcl objects you're working with. We'll try to figure out - // what kind of stuff is hiding behind the abstraction by having the Waitable add - // itself to a wait set, then take stock of what all ended up there. We'll also have - // to hope that no Waitable implementations ever change their component entities over - // their lifetimes. 😬 + // The Waitable API is too abstract for us to work with directly; it only exposes APIs for dealing + // with wait sets, and all of the rcl callback API requires knowing exactly what kinds of rcl + // objects you're working with. We'll try to figure out what kind of stuff is hiding behind the + // abstraction by having the Waitable add itself to a wait set, then take stock of what all ended + // up there. We'll also have to hope that no Waitable implementations ever change their component + // entities over their lifetimes. auto with_waitable = std::make_shared(waitable); const py::object num_entities = waitable.attr("get_num_entities")(); if (py::cast(num_entities.attr("num_guard_conditions")) != 0) { @@ -572,11 +548,10 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) auto with_waitset = std::make_shared(py::cast(wait_set)); waitable.attr("add_to_wait_set")(wait_set); rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); - // We null out each entry in the waitset as we set it up, so that the waitset itself - // can be reused when something becomes ready to signal to the Waitable what's ready - // and what's not. We also bind with_waitset into each callback we set up, to ensure - // that object doesn't get destroyed while any of these callbacks are still - // registered. + // We null out each entry in the waitset as we set it up, so that the waitset itself can be reused + // when something becomes ready to signal to the Waitable what's ready and what's not. We also + // bind with_waitset into each callback we set up, to ensure that object doesn't get destroyed + // while any of these callbacks are still registered. WaitableSubEntities sub_entities; for (size_t i = 0; i < rcl_waitset->size_of_subscriptions; ++i) { const rcl_subscription_t * const rcl_sub = rcl_waitset->subscriptions[i]; @@ -585,26 +560,25 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const auto cb = std::bind( &EventsExecutor::HandleWaitableSubReady, this, waitable, rcl_sub, wait_set, i, with_waitset, pl::_1); - if (RCL_RET_OK != - rcl_subscription_set_on_new_message_callback( - rcl_sub, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_sub, cb, with_waitable))) + if ( + RCL_RET_OK != rcl_subscription_set_on_new_message_callback( + rcl_sub, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_sub, cb, with_waitable))) { throw std::runtime_error( - std::string( - "Failed to set the on new message callback for Waitable subscription: ") + - rcl_get_error_string().str); + std::string("Failed to set the on new message callback for Waitable subscription: ") + + rcl_get_error_string().str); } } for (size_t i = 0; i < rcl_waitset->size_of_timers; ++i) { - // Unfortunately we do require a non-const pointer here, while the waitset structure - // contains a const pointer. + // Unfortunately we do require a non-const pointer here, while the waitset structure contains a + // const pointer. rcl_timer_t * const rcl_timer = const_cast(rcl_waitset->timers[i]); rcl_waitset->timers[i] = nullptr; sub_entities.timers.push_back(rcl_timer); - // Since this callback doesn't go through RclCallbackManager which would otherwise - // own an instance of `with_waitable` associated with this callback, we'll bind it - // directly into the callback instead. + // Since this callback doesn't go through RclCallbackManager which would otherwise own an + // instance of `with_waitable` associated with this callback, we'll bind it directly into the + // callback instead. const auto cb = std::bind( &EventsExecutor::HandleWaitableTimerReady, this, waitable, rcl_timer, wait_set, i, with_waitable, with_waitset); @@ -617,15 +591,14 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const auto cb = std::bind( &EventsExecutor::HandleWaitableClientReady, this, waitable, rcl_client, wait_set, i, with_waitset, pl::_1); - if (RCL_RET_OK != - rcl_client_set_on_new_response_callback( - rcl_client, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_client, cb, with_waitable))) + if ( + RCL_RET_OK != rcl_client_set_on_new_response_callback( + rcl_client, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_client, cb, with_waitable))) { throw std::runtime_error( - std::string( - "Failed to set the on new response callback for Waitable client: ") + - rcl_get_error_string().str); + std::string("Failed to set the on new response callback for Waitable client: ") + + rcl_get_error_string().str); } } for (size_t i = 0; i < rcl_waitset->size_of_services; ++i) { @@ -635,15 +608,14 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const auto cb = std::bind( &EventsExecutor::HandleWaitableServiceReady, this, waitable, rcl_service, wait_set, i, with_waitset, pl::_1); - if (RCL_RET_OK != - rcl_service_set_on_new_request_callback( - rcl_service, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback(rcl_service, cb, with_waitable))) + if ( + RCL_RET_OK != rcl_service_set_on_new_request_callback( + rcl_service, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_service, cb, with_waitable))) { throw std::runtime_error( - std::string( - "Failed to set the on new request callback for Waitable service: ") + - rcl_get_error_string().str); + std::string("Failed to set the on new request callback for Waitable service: ") + + rcl_get_error_string().str); } } for (size_t i = 0; i < rcl_waitset->size_of_events; ++i) { @@ -653,19 +625,19 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) const auto cb = std::bind( &EventsExecutor::HandleWaitableEventReady, this, waitable, rcl_event, wait_set, i, with_waitset, pl::_1); - if (RCL_RET_OK != rcl_event_set_callback(rcl_event, RclEventCallbackTrampoline, - rcl_callback_manager_.MakeCallback( - rcl_event, cb, with_waitable))) + if ( + RCL_RET_OK != rcl_event_set_callback( + rcl_event, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_event, cb, with_waitable))) { throw std::runtime_error( - std::string("Failed to set the callback for Waitable event: ") + - rcl_get_error_string().str); + std::string("Failed to set the callback for Waitable event: ") + + rcl_get_error_string().str); } } - // Save the set of discovered sub-entities for later use during tear-down since we - // can't repeat the wait set trick then, as the RCL context may already be destroyed - // at that point. + // Save the set of discovered sub-entities for later use during tear-down since we can't repeat + // the wait set trick then, as the RCL context may already be destroyed at that point. waitable_entities_[waitable] = std::move(sub_entities); } @@ -677,12 +649,11 @@ void EventsExecutor::HandleRemovedWaitable(py::handle waitable) } const WaitableSubEntities & sub_entities = nh.mapped(); for (const rcl_subscription_t * const rcl_sub : sub_entities.subscriptions) { - if (RCL_RET_OK != - rcl_subscription_set_on_new_message_callback(rcl_sub, nullptr, nullptr)) - { - throw std::runtime_error(std::string("Failed to clear the on new message " - "callback for Waitable subscription: ") + - rcl_get_error_string().str); + if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback(rcl_sub, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the on new message " + "callback for Waitable subscription: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_sub); } @@ -690,30 +661,28 @@ void EventsExecutor::HandleRemovedWaitable(py::handle waitable) timers_manager_.rcl_manager().RemoveTimer(rcl_timer); } for (const rcl_client_t * const rcl_client : sub_entities.clients) { - if (RCL_RET_OK != - rcl_client_set_on_new_response_callback(rcl_client, nullptr, nullptr)) - { - throw std::runtime_error(std::string("Failed to clear the on new response " - "callback for Waitable client: ") + - rcl_get_error_string().str); + if (RCL_RET_OK != rcl_client_set_on_new_response_callback(rcl_client, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the on new response " + "callback for Waitable client: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_client); } for (const rcl_service_t * const rcl_service : sub_entities.services) { - if (RCL_RET_OK != - rcl_service_set_on_new_request_callback(rcl_service, nullptr, nullptr)) - { - throw std::runtime_error(std::string("Failed to clear the on new request " - "callback for Waitable service: ") + - rcl_get_error_string().str); + if (RCL_RET_OK != rcl_service_set_on_new_request_callback(rcl_service, nullptr, nullptr)) { + throw std::runtime_error( + std::string("Failed to clear the on new request " + "callback for Waitable service: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_service); } for (const rcl_event_t * const rcl_event : sub_entities.events) { if (RCL_RET_OK != rcl_event_set_callback(rcl_event, nullptr, nullptr)) { throw std::runtime_error( - std::string("Failed to clear the callback for Waitable event: ") + - rcl_get_error_string().str); + std::string("Failed to clear the callback for Waitable event: ") + + rcl_get_error_string().str); } rcl_callback_manager_.RemoveCallback(rcl_event); } @@ -725,8 +694,8 @@ void EventsExecutor::HandleWaitableSubReady( { py::gil_scoped_acquire gil_acquire; - // We need to set up the wait set to make it look like our subscription object is - // ready, and then poke the Waitable to do what it needs to do from there. + // We need to set up the wait set to make it look like our subscription object is ready, and then + // poke the Waitable to do what it needs to do from there. rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->subscriptions[wait_set_sub_index] = rcl_sub; HandleWaitableReady(waitable, wait_set, number_of_events); @@ -740,8 +709,8 @@ void EventsExecutor::HandleWaitableTimerReady( { py::gil_scoped_acquire gil_acquire; - // We need to set up the wait set to make it look like our timer object is ready, and - // then poke the Waitable to do what it needs to do from there. + // We need to set up the wait set to make it look like our timer object is ready, and then poke + // the Waitable to do what it needs to do from there. rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->timers[wait_set_timer_index] = rcl_timer; HandleWaitableReady(waitable, wait_set, 1); @@ -755,8 +724,8 @@ void EventsExecutor::HandleWaitableClientReady( { py::gil_scoped_acquire gil_acquire; - // We need to set up the wait set to make it look like our client object is ready, and - // then poke the Waitable to do what it needs to do from there. + // We need to set up the wait set to make it look like our client object is ready, and then poke + // the Waitable to do what it needs to do from there. rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->clients[wait_set_client_index] = rcl_client; HandleWaitableReady(waitable, wait_set, number_of_events); @@ -770,8 +739,8 @@ void EventsExecutor::HandleWaitableServiceReady( { py::gil_scoped_acquire gil_acquire; - // We need to set up the wait set to make it look like our service object is ready, - // and then poke the Waitable to do what it needs to do from there. + // We need to set up the wait set to make it look like our service object is ready, and then poke + // the Waitable to do what it needs to do from there. rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->services[wait_set_service_index] = rcl_service; HandleWaitableReady(waitable, wait_set, number_of_events); @@ -785,8 +754,8 @@ void EventsExecutor::HandleWaitableEventReady( { py::gil_scoped_acquire gil_acquire; - // We need to set up the wait set to make it look like our event object is ready, and - // then poke the Waitable to do what it needs to do from there. + // We need to set up the wait set to make it look like our event object is ready, and then poke + // the Waitable to do what it needs to do from there. rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); rcl_waitset->events[wait_set_event_index] = rcl_event; HandleWaitableReady(waitable, wait_set, number_of_events); @@ -807,8 +776,8 @@ void EventsExecutor::HandleWaitableReady( future.attr("_set_executor")(py::cast(this)); } for (size_t i = 0; i < number_of_events; ++i) { - // This method can have side effects, so it needs to be called even though it looks - // like just an accessor. + // This method can have side effects, so it needs to be called even though it looks like just an + // accessor. if (!is_ready(wait_set)) { throw std::runtime_error("Failed to make Waitable ready"); } @@ -831,22 +800,20 @@ void EventsExecutor::IterateTask(py::handle task) task(); if (task.attr("done")()) { py::object ex = task.attr("exception")(); - // Drop reference with GIL held. This doesn't necessarily destroy the underlying - // Task, since the `create_task()` caller may have retained a reference to the - // returned value. + // Drop reference with GIL held. This doesn't necessarily destroy the underlying Task, since + // the `create_task()` caller may have retained a reference to the returned value. task.dec_ref(); if (!ex.is_none()) { - // It's not clear how to easily turn a Python exception into a C++ one, so let's - // just throw it again and let pybind translate it normally. + // It's not clear how to easily turn a Python exception into a C++ one, so let's just throw it + // again and let pybind translate it normally. py::dict scope; scope["ex"] = ex; try { py::exec("raise ex", scope); } catch (py::error_already_set & cpp_ex) { - // There's no good way to know what node this task came from. If we only have - // one node, we can use the logger from that, otherwise we'll have to leave it - // undefined. + // There's no good way to know what node this task came from. If we only have one node, we + // can use the logger from that, otherwise we'll have to leave it undefined. py::object logger = py::none(); if (nodes_.size() == 1) { logger = nodes_[0].attr("get_logger")(); @@ -857,23 +824,21 @@ void EventsExecutor::IterateTask(py::handle task) } } else { // Task needs more iteration. Post back to the asio loop again. - // TODO(bmartin427) Not sure this is correct; in particular, it's unclear how a task - // that needs to wait a while can avoid either blocking or spinning. Revisit when - // asyncio support is intentionally added. + // TODO(bmartin427) Not sure this is correct; in particular, it's unclear how a task that needs + // to wait a while can avoid either blocking or spinning. Revisit when asyncio support is + // intentionally added. asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, task)); } } void EventsExecutor::HandleCallbackExceptionInNodeEntity( - const py::error_already_set & exc, py::handle entity, - const std::string & node_entity_attr) + const py::error_already_set & exc, py::handle entity, const std::string & node_entity_attr) { - // Try to identify the node associated with the entity that threw the exception, so we - // can log to it. + // Try to identify the node associated with the entity that threw the exception, so we can log to + // it. for (py::handle node : nodes_) { if (py::set(node.attr(node_entity_attr.c_str())).contains(entity)) { - return HandleCallbackExceptionWithLogger(exc, node.attr("get_logger")(), - node_entity_attr); + return HandleCallbackExceptionWithLogger(exc, node.attr("get_logger")(), node_entity_attr); } } @@ -882,31 +847,29 @@ void EventsExecutor::HandleCallbackExceptionInNodeEntity( } void EventsExecutor::HandleCallbackExceptionWithLogger( - const py::error_already_set & exc, - py::object logger, - const std::string & entity_type) + const py::error_already_set & exc, py::object logger, const std::string & entity_type) { if (logger.is_none()) { py::object logging = py::module_::import("rclpy.logging"); logger = logging.attr("get_logger")("UNKNOWN"); } - // The logger API won't let you call it with two different severities, from what it - // considers the same code location. Since it has no visibility into C++, all calls - // made from here will be attributed to the python that last called into here. - // Instead we will call out to python for logging. + // The logger API won't let you call it with two different severities, from what it considers the + // same code location. Since it has no visibility into C++, all calls made from here will be + // attributed to the python that last called into here. Instead we will call out to python for + // logging. py::dict scope; scope["logger"] = logger; scope["node_entity_attr"] = entity_type; scope["exc_value"] = exc.value(); scope["exc_trace"] = exc.trace(); py::exec( - R"( + R"( import traceback logger.fatal(f"Exception in '{node_entity_attr}' callback: {exc_value}") logger.warn("Error occurred at:\n" + "".join(traceback.format_tb(exc_trace))) )", - scope); + scope); } // pybind11 module bindings @@ -923,11 +886,12 @@ void define_events_executor(py::object module) .def("wake", &EventsExecutor::wake) .def("spin", [](EventsExecutor & exec) {exec.spin();}) .def("spin_once", &EventsExecutor::spin_once, py::arg("timeout_sec") = py::none()) - .def("spin_until_future_complete", &EventsExecutor::spin_until_future_complete, - py::arg("future"), py::arg("timeout_sec") = py::none()) - .def("spin_once_until_future_complete", - &EventsExecutor::spin_once_until_future_complete, py::arg("future"), - py::arg("timeout_sec") = py::none()); + .def( + "spin_until_future_complete", &EventsExecutor::spin_until_future_complete, py::arg("future"), + py::arg("timeout_sec") = py::none()) + .def( + "spin_once_until_future_complete", &EventsExecutor::spin_once_until_future_complete, + py::arg("future"), py::arg("timeout_sec") = py::none()); } } // namespace events_executor diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 8812abd6c..c01d763bd 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -46,15 +46,15 @@ namespace events_executor /// Events executor implementation for rclpy /// -/// This executor implementation attempts to replicate the function of the rclcpp -/// EventsExecutor for the benefit of rclpy applications. It is implemented in C++ to -/// minimize the overhead of processing the event loop. +/// This executor implementation attempts to replicate the function of the rclcpp EventsExecutor for +/// the benefit of rclpy applications. It is implemented in C++ to minimize the overhead of +/// processing the event loop. /// -/// We assume all public methods could be invoked from any thread. Callbacks on the -/// executor loop will be issued on the thread that called one of the spin*() variants -/// (ignoring any parallelism that might be allowed by the callback group -/// configuration). -class EventsExecutor { +/// We assume all public methods could be invoked from any thread. Callbacks on the executor loop +/// will be issued on the thread that called one of the spin*() variants (ignoring any parallelism +/// that might be allowed by the callback group configuration). +class EventsExecutor +{ public: /// @param context the rclpy Context object to operate on explicit EventsExecutor(pybind11::object context); @@ -64,20 +64,16 @@ class EventsExecutor { // rclpy Executor API methods: pybind11::object get_context() const {return rclpy_context_;} pybind11::object create_task( - pybind11::object callback, pybind11::args args, - const pybind11::kwargs & kwargs); + pybind11::object callback, pybind11::args args, const pybind11::kwargs & kwargs); bool shutdown(std::optional timeout_sec = {}); bool add_node(pybind11::object node); void remove_node(pybind11::handle node); void wake(); void spin(std::optional timeout_sec = {}); void spin_once(std::optional timeout_sec = {}); - void spin_until_future_complete( - pybind11::handle future, - std::optional timeout_sec = {}); + void spin_until_future_complete(pybind11::handle future, std::optional timeout_sec = {}); void spin_once_until_future_complete( - pybind11::handle future, - std::optional timeout_sec = {}); + pybind11::handle future, std::optional timeout_sec = {}); private: // Structure to hold entities discovered underlying a Waitable object. @@ -90,14 +86,14 @@ class EventsExecutor { std::vector events; }; - /// Updates the sets of known entities based on the currently tracked nodes. This is - /// not thread safe, so it must be posted to the io_context if the executor is - /// currently spinning. Expects the GIL to be held before calling. If @p shutdown is - /// true, a purge of all known nodes and entities is forced. + /// Updates the sets of known entities based on the currently tracked nodes. This is not thread + /// safe, so it must be posted to the io_context if the executor is currently spinning. Expects + /// the GIL to be held before calling. If @p shutdown is true, a purge of all known nodes and + /// entities is forced. void UpdateEntitiesFromNodes(bool shutdown); - /// Given an existing set of entities and a set with the desired new state, updates - /// the existing set and invokes callbacks on each added or removed entity. + /// Given an existing set of entities and a set with the desired new state, updates the existing + /// set and invokes callbacks on each added or removed entity. void UpdateEntitySet( pybind11::set & entity_set, const pybind11::set & new_entity_set, std::function added_entity_callback, @@ -142,18 +138,15 @@ class EventsExecutor { void HandleWaitableReady( pybind11::handle waitable, std::shared_ptr wait_set, size_t number_of_events); - /// Helper for create_task(). @p task needs to have had one reference manually added - /// to it. See create_task() implementation for details. + /// Helper for create_task(). @p task needs to have had one reference manually added to it. See + /// create_task() implementation for details. void IterateTask(pybind11::handle task); void HandleCallbackExceptionInNodeEntity( - const pybind11::error_already_set &, - pybind11::handle entity, + const pybind11::error_already_set &, pybind11::handle entity, const std::string & node_entity_attr); void HandleCallbackExceptionWithLogger( - const pybind11::error_already_set &, - pybind11::object logger, - const std::string & entity_type); + const pybind11::error_already_set &, pybind11::object logger, const std::string & entity_type); const pybind11::object rclpy_context_; @@ -177,10 +170,9 @@ class EventsExecutor { pybind11::set services_; pybind11::set waitables_; - /// Cache for rcl pointers underlying each waitables_ entry, because those are harder - /// to retrieve than the other entity types. - std::unordered_map - waitable_entities_; + /// Cache for rcl pointers underlying each waitables_ entry, because those are harder to retrieve + /// than the other entity types. + std::unordered_map waitable_entities_; RclCallbackManager rcl_callback_manager_; TimersManager timers_manager_; diff --git a/rclpy/src/rclpy/events_executor/rcl_support.cpp b/rclpy/src/rclpy/events_executor/rcl_support.cpp index ab1c01922..fa581132a 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.cpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.cpp @@ -25,22 +25,21 @@ namespace rclpy namespace events_executor { -extern "C" void RclEventCallbackTrampoline( - const void * user_data, - size_t number_of_events) +extern "C" void RclEventCallbackTrampoline(const void * user_data, size_t number_of_events) { const auto cb = reinterpret_cast *>(user_data); (*cb)(number_of_events); } RclCallbackManager::RclCallbackManager(const asio::any_io_executor & executor) -: executor_(executor) {} +: executor_(executor) +{ +} RclCallbackManager::~RclCallbackManager() { - // Should not still have any callbacks registered when we exit, because otherwise RCL - // can call pointers that will no longer be valid. We can't throw an exception here, - // but we can explode. + // Should not still have any callbacks registered when we exit, because otherwise RCL can call + // pointers that will no longer be valid. We can't throw an exception here, but we can explode. if (!owned_cbs_.empty()) { py::gil_scoped_acquire gil_acquire; py::print("Destroying callback manager with callbacks remaining"); @@ -49,28 +48,25 @@ RclCallbackManager::~RclCallbackManager() } const void * RclCallbackManager::MakeCallback( - const void * key, - std::function callback, - std::shared_ptr with) + const void * key, std::function callback, std::shared_ptr with) { - // We don't support replacing an existing callback with a new one, because it gets - // tricky making sure we don't delete an old callback while the middleware still holds - // a pointer to it. + // We don't support replacing an existing callback with a new one, because it gets tricky making + // sure we don't delete an old callback while the middleware still holds a pointer to it. if (owned_cbs_.count(key) != 0) { throw py::key_error("Attempt to replace existing callback"); } CbEntry new_entry; - new_entry.cb = std::make_unique>( - [this, callback, key](size_t number_of_events) { - asio::post(executor_, [this, callback, key, number_of_events]() { - if (!owned_cbs_.count(key)) { - // This callback has been removed, just drop it as the objects it may want - // to touch may no longer exist. - return; - } - callback(number_of_events); - }); + new_entry.cb = + std::make_unique>([this, callback, key](size_t number_of_events) { + asio::post(executor_, [this, callback, key, number_of_events]() { + if (!owned_cbs_.count(key)) { + // This callback has been removed, just drop it as the objects it may want to touch may + // no longer exist. + return; + } + callback(number_of_events); }); + }); new_entry.with = with; const void * ret = new_entry.cb.get(); owned_cbs_[key] = std::move(new_entry); diff --git a/rclpy/src/rclpy/events_executor/rcl_support.hpp b/rclpy/src/rclpy/events_executor/rcl_support.hpp index 91e1db42b..e8d94a48b 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.hpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.hpp @@ -31,39 +31,35 @@ namespace events_executor /// Use this for all RCL event callbacks. Use the return value from /// RclCallbackManager::MakeCallback() as the user data arg. /// -/// Note that RCL callbacks are invoked in some arbitrary thread originating from the -/// middleware. Callbacks should process quickly to avoid blocking the middleware; i.e. -/// all actual work should be posted to an asio loop in another thread. -extern "C" void RclEventCallbackTrampoline( - const void * user_data, - size_t number_of_events); +/// Note that RCL callbacks are invoked in some arbitrary thread originating from the middleware. +/// Callbacks should process quickly to avoid blocking the middleware; i.e. all actual work should +/// be posted to an asio loop in another thread. +extern "C" void RclEventCallbackTrampoline(const void * user_data, size_t number_of_events); /// Creates and maintains callback wrappers used with the RCL C library. -class RclCallbackManager { +class RclCallbackManager +{ public: - /// All user callbacks will be posted on the @p executor given to the constructor. - /// These callbacks will be invoked without the Python Global Interpreter Lock held, - /// so if they need to access Python at all make sure to acquire that explicitly. + /// All user callbacks will be posted on the @p executor given to the constructor. These + /// callbacks will be invoked without the Python Global Interpreter Lock held, so if they need to + /// access Python at all make sure to acquire that explicitly. explicit RclCallbackManager(const asio::any_io_executor & executor); ~RclCallbackManager(); - /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a - /// pointer to the rcl object that will be associated with the callback. @p with - /// protects the _rclpy object handle owning the RCL object, for the duration the - /// callback is established. + /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a pointer to the + /// rcl object that will be associated with the callback. @p with protects the _rclpy object + /// handle owning the RCL object, for the duration the callback is established. const void * MakeCallback( - const void * key, std::function callback, - std::shared_ptr with); + const void * key, std::function callback, std::shared_ptr with); - /// Discard a previously constructed callback. @p key should be the same value - /// provided to MakeCallback(). Caution: ensure that RCL is no longer using a - /// callback before invoking this. + /// Discard a previously constructed callback. @p key should be the same value provided to + /// MakeCallback(). Caution: ensure that RCL is no longer using a callback before invoking this. void RemoveCallback(const void * key); private: - /// The C RCL interface deals in raw pointers, so someone needs to own the C++ - /// function objects we'll be calling into. We use unique pointers so the raw pointer - /// to the object remains stable while the map is manipulated. + /// The C RCL interface deals in raw pointers, so someone needs to own the C++ function objects + /// we'll be calling into. We use unique pointers so the raw pointer to the object remains stable + /// while the map is manipulated. struct CbEntry { std::unique_ptr> cb; @@ -72,8 +68,8 @@ class RclCallbackManager { asio::any_io_executor executor_; - /// The map key is the raw pointer to the RCL entity object (subscription, etc) - /// associated with the callback. + /// The map key is the raw pointer to the RCL entity object (subscription, etc) associated with + /// the callback. std::unordered_map owned_cbs_; }; diff --git a/rclpy/src/rclpy/events_executor/scoped_with.hpp b/rclpy/src/rclpy/events_executor/scoped_with.hpp index c24eb5361..46e3e226a 100644 --- a/rclpy/src/rclpy/events_executor/scoped_with.hpp +++ b/rclpy/src/rclpy/events_executor/scoped_with.hpp @@ -22,7 +22,8 @@ namespace events_executor { /// Enters a python context manager for the scope of this object instance. -class ScopedWith { +class ScopedWith +{ public: explicit ScopedWith(pybind11::handle object) : object_(pybind11::cast(object)) @@ -30,10 +31,7 @@ class ScopedWith { object_.attr("__enter__")(); } - ~ScopedWith() - { - object_.attr("__exit__")(pybind11::none(), pybind11::none(), pybind11::none()); - } + ~ScopedWith() {object_.attr("__exit__")(pybind11::none(), pybind11::none(), pybind11::none());} private: pybind11::object object_; diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index b84bc018a..777122e77 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -38,34 +38,33 @@ namespace events_executor namespace { -// Implementation note: the original iRobot TimersManager associated with the rclcpp -// EventsExecutor maintained a heap with all outstanding timers sorted by their next -// expiration time. Here that approach will be skipped in favor of just looking at -// every timer every update for the following reasons: +// Implementation note: the original iRobot TimersManager associated with the rclcpp EventsExecutor +// maintained a heap with all outstanding timers sorted by their next expiration time. Here that +// approach will be skipped in favor of just looking at every timer every update for the following +// reasons: // // * This approach is simpler -// * In the applications this has been used in so far, no Node types exist that have a -// large number of timers outstanding at once. Assuming no more than a few timers -// exist in the whole process, the heap seems like overkill. +// * In the applications this has been used in so far, no Node types exist that have a large number +// of timers outstanding at once. Assuming no more than a few timers exist in the whole process, +// the heap seems like overkill. // * Every time a timer ticks or is reset, the heap needs to be resorted anyway. -// * The rcl timer interface doesn't expose any way to get timer expiration info in -// absolute terms; you can only find out 'time until next callback'. This means if -// you are trying to sort the list of timers, the 'value' of each entry in the heap -// changes depending on when you look at it during the process of sorting. +// * The rcl timer interface doesn't expose any way to get timer expiration info in absolute terms; +// you can only find out 'time until next callback'. This means if you are trying to sort the +// list of timers, the 'value' of each entry in the heap changes depending on when you look at it +// during the process of sorting. // -// We will however yell a bit if we ever see a large number of timers that disproves -// this assumption, so we can reassess this decision. +// We will however yell a bit if we ever see a large number of timers that disproves this +// assumption, so we can reassess this decision. constexpr size_t WARN_TIMERS_COUNT = 8; typedef std::function ClockJumpCallbackT; typedef std::function TimerResetCallbackT; extern "C" void RclClockJumpTrampoline( - const rcl_time_jump_t * time_jump, - bool before_jump, void * user_data) + const rcl_time_jump_t * time_jump, bool before_jump, void * user_data) { - // rcl calls this both before and after a clock change, and we never want the before - // callback, so let's just eliminate that case early. + // rcl calls this both before and after a clock change, and we never want the before callback, so + // let's just eliminate that case early. if (before_jump) { return; } @@ -81,18 +80,17 @@ extern "C" void RclTimerResetTrampoline(const void * user_data, size_t) } // namespace -/// Manages a single clock source, and all timers operating on that source. All methods -/// (including construction and destruction) are assumed to happen on the thread running -/// the provided asio executor. -class RclTimersManager::ClockManager { +/// Manages a single clock source, and all timers operating on that source. All methods (including +/// construction and destruction) are assumed to happen on the thread running the provided asio +/// executor. +class RclTimersManager::ClockManager +{ public: ClockManager(const asio::any_io_executor & executor, rcl_clock_t * clock) : executor_(executor), clock_(clock) { - // Need to establish a clock jump callback so we can tell when debug time is - // updated. - rcl_jump_threshold_t threshold{ - .on_clock_change = true, .min_forward = 1, .min_backward = -1}; + // Need to establish a clock jump callback so we can tell when debug time is updated. + rcl_jump_threshold_t threshold{.on_clock_change = true, .min_forward = 1, .min_backward = -1}; // Note, this callback could happen on any thread jump_cb_ = [this](const rcl_time_jump_t * time_jump) { bool on_debug{}; @@ -108,37 +106,33 @@ class RclTimersManager::ClockManager { } asio::post(executor_, std::bind(&ClockManager::HandleJump, this, on_debug)); }; - if (RCL_RET_OK != rcl_clock_add_jump_callback(clock_, threshold, - RclClockJumpTrampoline, &jump_cb_)) + if ( + RCL_RET_OK != + rcl_clock_add_jump_callback(clock_, threshold, RclClockJumpTrampoline, &jump_cb_)) { - throw std::runtime_error(std::string("Failed to set RCL clock jump callback: ") + - rcl_get_error_string().str); + throw std::runtime_error( + std::string("Failed to set RCL clock jump callback: ") + rcl_get_error_string().str); } - // This isn't necessary yet but every timer will eventually depend on it. Again, - // this could happen on any thread. - reset_cb_ = [this]() { - asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); - }; + // This isn't necessary yet but every timer will eventually depend on it. Again, this could + // happen on any thread. + reset_cb_ = [this]() {asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this));}; // Initialize which timebase we're on if (clock_->type == RCL_ROS_TIME) { if (RCL_RET_OK != rcl_is_enabled_ros_time_override(clock_, &on_debug_time_)) { throw std::runtime_error( - std::string("Failed to get RCL clock override state: ") + - rcl_get_error_string().str); + std::string("Failed to get RCL clock override state: ") + rcl_get_error_string().str); } } } ~ClockManager() { - if (RCL_RET_OK != - rcl_clock_remove_jump_callback(clock_, RclClockJumpTrampoline, &jump_cb_)) - { + if (RCL_RET_OK != rcl_clock_remove_jump_callback(clock_, RclClockJumpTrampoline, &jump_cb_)) { py::gil_scoped_acquire gil_acquire; - py::print(std::string("Failed to remove RCL clock jump callback: ") + - rcl_get_error_string().str); + py::print( + std::string("Failed to remove RCL clock jump callback: ") + rcl_get_error_string().str); } while (!timers_.empty()) { RemoveTimer(timers_.begin()->first); @@ -150,11 +144,9 @@ class RclTimersManager::ClockManager { void AddTimer(rcl_timer_t * timer, std::function ready_callback) { // All timers have the same reset callback - if (RCL_RET_OK != - rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) - { - throw std::runtime_error(std::string("Failed to set timer reset callback: ") + - rcl_get_error_string().str); + if (RCL_RET_OK != rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) { + throw std::runtime_error( + std::string("Failed to set timer reset callback: ") + rcl_get_error_string().str); } timers_[timer] = ready_callback; if (timers_.size() == WARN_TIMERS_COUNT) { @@ -172,14 +164,13 @@ class RclTimersManager::ClockManager { } if (RCL_RET_OK != rcl_timer_set_on_reset_callback(timer, nullptr, nullptr)) { - throw std::runtime_error(std::string("Failed to clear timer reset callback: ") + - rcl_get_error_string().str); + throw std::runtime_error( + std::string("Failed to clear timer reset callback: ") + rcl_get_error_string().str); } timers_.erase(it); - // We could re-evaluate how long we need to block for now that a timer has been - // removed; but, there's no real harm in one extra wakeup that then decides it - // doesn't need to do anything, and this timer might not even be the next to fire, - // so we won't bother. + // We could re-evaluate how long we need to block for now that a timer has been removed; but, + // there's no real harm in one extra wakeup that then decides it doesn't need to do anything, + // and this timer might not even be the next to fire, so we won't bother. } private: @@ -191,30 +182,29 @@ class RclTimersManager::ClockManager { void UpdateTimers() { - // First, evaluate all of our timers and dispatch any that are ready now. While - // we're at it, keep track of the earliest next timer callback that is due. + // First, evaluate all of our timers and dispatch any that are ready now. While we're at it, + // keep track of the earliest next timer callback that is due. std::optional next_ready_ns; for (const auto & timer_cb_pair : timers_) { auto next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); if (next_call_ns <= 0) { - // This just notifies RCL that we're considering the timer triggered, for the - // purposes of updating the next trigger time. + // This just notifies RCL that we're considering the timer triggered, for the purposes of + // updating the next trigger time. const auto ret = rcl_timer_call(timer_cb_pair.first); switch (ret) { case RCL_RET_OK: break; case RCL_RET_TIMER_CANCELED: - // Someone apparently canceled the timer *after* we just queried the next - // call time? Nevermind, then... + // Someone apparently canceled the timer *after* we just queried the next call time? + // Nevermind, then... rcl_reset_error(); continue; default: - throw std::runtime_error(std::string("Failed to call RCL timer: ") + - rcl_get_error_string().str); + throw std::runtime_error( + std::string("Failed to call RCL timer: ") + rcl_get_error_string().str); } - // Post the user callback to be invoked later once timing-sensitive code is - // done. + // Post the user callback to be invoked later once timing-sensitive code is done. asio::post(executor_, timer_cb_pair.second); // Update time until *next* call. @@ -225,9 +215,8 @@ class RclTimersManager::ClockManager { } } - // If we're not on debug time, we should schedule another wakeup when we - // anticipate the next timer being ready. If we are, we'll just re-check - // everything at the next jump callback. + // If we're not on debug time, we should schedule another wakeup when we anticipate the next + // timer being ready. If we are, we'll just re-check everything at the next jump callback. if (!on_debug_time_ && next_ready_ns) { next_update_wait_.expires_from_now(std::chrono::nanoseconds(*next_ready_ns)); next_update_wait_.async_wait([this](const asio::error_code & ec) { @@ -242,22 +231,21 @@ class RclTimersManager::ClockManager { } } - /// Returns the number of nanoseconds until the next callback on the given timer is - /// due. Value may be negative or zero if callback time has already been reached. - /// Returns std::nullopt if the timer is canceled. + /// Returns the number of nanoseconds until the next callback on the given timer is due. Value + /// may be negative or zero if callback time has already been reached. Returns std::nullopt if + /// the timer is canceled. static std::optional GetNextCallNanoseconds(const rcl_timer_t * rcl_timer) { int64_t time_until_next_call{}; - const rcl_ret_t ret = - rcl_timer_get_time_until_next_call(rcl_timer, &time_until_next_call); + const rcl_ret_t ret = rcl_timer_get_time_until_next_call(rcl_timer, &time_until_next_call); switch (ret) { case RCL_RET_OK: return time_until_next_call; case RCL_RET_TIMER_CANCELED: return {}; default: - throw std::runtime_error(std::string("Failed to fetch timer ready time: ") + - rcl_get_error_string().str); + throw std::runtime_error( + std::string("Failed to fetch timer ready time: ") + rcl_get_error_string().str); } } @@ -282,24 +270,22 @@ rcl_clock_t * GetTimerClock(rcl_timer_t * timer) { rcl_clock_t * clock{}; if (RCL_RET_OK != rcl_timer_clock(timer, &clock)) { - throw std::runtime_error(std::string("Failed to determine clock for timer: ") + - rcl_get_error_string().str); + throw std::runtime_error( + std::string("Failed to determine clock for timer: ") + rcl_get_error_string().str); } return clock; } } // namespace -void RclTimersManager::AddTimer( - rcl_timer_t * timer, - std::function ready_callback) +void RclTimersManager::AddTimer(rcl_timer_t * timer, std::function ready_callback) { - // Figure out the clock this timer is using, make sure a manager exists for that - // clock, then forward the timer to that clock's manager. + // Figure out the clock this timer is using, make sure a manager exists for that clock, then + // forward the timer to that clock's manager. rcl_clock_t * clock = GetTimerClock(timer); auto it = clock_managers_.find(clock); if (it == clock_managers_.end()) { std::tie(it, std::ignore) = clock_managers_.insert( - std::make_pair(clock, std::make_unique(executor_, clock))); + std::make_pair(clock, std::make_unique(executor_, clock))); } it->second->AddTimer(timer, ready_callback); } @@ -318,9 +304,10 @@ void RclTimersManager::RemoveTimer(rcl_timer_t * timer) } TimersManager::TimersManager( - const asio::any_io_executor & executor, - std::function timer_ready_callback) -: rcl_manager_(executor), ready_callback_(timer_ready_callback) {} + const asio::any_io_executor & executor, std::function timer_ready_callback) +: rcl_manager_(executor), ready_callback_(timer_ready_callback) +{ +} TimersManager::~TimersManager() {} diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp index d173d1cdb..bdcdda779 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.hpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -34,7 +34,8 @@ namespace events_executor { /// This class manages low-level rcl timers in the system on behalf of EventsExecutor. -class RclTimersManager { +class RclTimersManager +{ public: explicit RclTimersManager(const asio::any_io_executor &); ~RclTimersManager(); @@ -51,13 +52,13 @@ class RclTimersManager { }; /// This class manages rclpy.Timer Python objects on behalf of EventsExecutor. -class TimersManager { +class TimersManager +{ public: - /// @p timer_ready_callback will be invoked with the timer handle whenever a managed - /// timer is ready for servicing. + /// @p timer_ready_callback will be invoked with the timer handle whenever a managed timer is + /// ready for servicing. TimersManager( - const asio::any_io_executor &, - std::function timer_ready_callback); + const asio::any_io_executor &, std::function timer_ready_callback); ~TimersManager(); /// Accessor for underlying rcl timer manager, for management of non-Python timers. @@ -70,8 +71,8 @@ class TimersManager { private: struct PyRclMapping { - /// Marks the corresponding Python object as in-use for as long as we're using the - /// rcl pointer derived from it. + /// Marks the corresponding Python object as in-use for as long as we're using the rcl pointer + /// derived from it. std::unique_ptr with; /// The underlying rcl object From 36315946ce106361d876ecfe125dae7f617de0b8 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Thu, 9 Jan 2025 21:53:50 +0000 Subject: [PATCH 08/31] Respect init signal handling options Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 46 ++++++++++++------- .../rclpy/events_executor/events_executor.hpp | 2 +- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 692e7df2d..da2c06f06 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -53,7 +53,7 @@ EventsExecutor::EventsExecutor(py::object context) : rclpy_context_(context), asyncio_run_(py::module_::import("asyncio").attr("run")), rclpy_task_(py::module_::import("rclpy.task").attr("Task")), - signals_(io_context_, SIGINT, SIGTERM), + signals_(io_context_), rcl_callback_manager_(io_context_.get_executor()), timers_manager_( io_context_.get_executor(), std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) @@ -63,20 +63,32 @@ EventsExecutor::EventsExecutor(py::object context) // // Unfortunately it doesn't look like we can either support generic guard conditions or hook into // the existing rclpy signal handling in any other useful way. We'll just establish our own - // signal handling directly instead. This unfortunately bypasses the rclpy.init() options that - // allow a user to disable certain signal handlers, but it doesn't look like we can do any better. - signals_.async_wait([this](const asio::error_code & ec, int) { - if (!ec) { - py::gil_scoped_acquire gil_acquire; - // Don't call context.try_shutdown() here, because that can call back to us to request a - // blocking shutdown(), which doesn't make any sense because we have to be spinning to - // process the callback that's asked to wait for spinning to stop. We'll have to call that - // later outside of any spin loop. - // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 - sigint_pending_.store(true); - io_context_.stop(); - } - }); + // signal handling directly instead. We can at least hook into the rclpy.init() options that + // allow a user to disable certain signal handlers. + auto rclpy_signals = py::module_::import("rclpy.signals"); + const int signal_options = + py::cast(rclpy_signals.attr("get_current_signal_handlers_options")()); + const auto sig_ops_enum = rclpy_signals.attr("SignalHandlerOptions"); + if (signal_options & py::cast(sig_ops_enum.attr("SIGINT"))) { + signals_.add(SIGINT); + } + if (signal_options & py::cast(sig_ops_enum.attr("SIGTERM"))) { + signals_.add(SIGTERM); + } + if (signal_options != py::cast(sig_ops_enum.attr("NO"))) { + signals_.async_wait([this](const asio::error_code & ec, int) { + if (!ec) { + py::gil_scoped_acquire gil_acquire; + // Don't call context.try_shutdown() here, because that can call back to us to request + // a blocking shutdown(), which doesn't make any sense because we have to be spinning + // to process the callback that's asked to wait for spinning to stop. We'll have to + // call that later outside of any spin loop. + // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 + signal_pending_.store(true); + io_context_.stop(); + } + }); + } } EventsExecutor::~EventsExecutor() {shutdown();} @@ -184,7 +196,7 @@ void EventsExecutor::spin(std::optional timeout_sec) io_context_.restart(); } - if (sigint_pending_.exchange(false)) { + if (signal_pending_.exchange(false)) { rclpy_context_.attr("try_shutdown")(); } } @@ -210,7 +222,7 @@ void EventsExecutor::spin_once(std::optional timeout_sec) io_context_.restart(); } - if (sigint_pending_.exchange(false)) { + if (signal_pending_.exchange(false)) { rclpy_context_.attr("try_shutdown")(); } } diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index c01d763bd..0ff977a89 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -161,7 +161,7 @@ class EventsExecutor pybind11::set nodes_; ///< The set of all nodes we're executing std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning - std::atomic sigint_pending_{}; + std::atomic signal_pending_{}; // Collection of awaitable entities we're servicing pybind11::set subscriptions_; From f68f995ada1936eca7f2ccdd8bf22693b3c4faaa Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 10 Jan 2025 00:07:37 +0000 Subject: [PATCH 09/31] Reconcile signal handling differences with SingleThreadedExecutor Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 40 ++++++++++++++----- .../rclpy/events_executor/events_executor.hpp | 16 +++++--- 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index da2c06f06..e6814a2a0 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -76,7 +76,7 @@ EventsExecutor::EventsExecutor(py::object context) signals_.add(SIGTERM); } if (signal_options != py::cast(sig_ops_enum.attr("NO"))) { - signals_.async_wait([this](const asio::error_code & ec, int) { + signals_.async_wait([this](const asio::error_code & ec, int signum) { if (!ec) { py::gil_scoped_acquire gil_acquire; // Don't call context.try_shutdown() here, because that can call back to us to request @@ -84,7 +84,7 @@ EventsExecutor::EventsExecutor(py::object context) // to process the callback that's asked to wait for spinning to stop. We'll have to // call that later outside of any spin loop. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 - signal_pending_.store(true); + signal_pending_.store(signum); io_context_.stop(); } }); @@ -196,9 +196,7 @@ void EventsExecutor::spin(std::optional timeout_sec) io_context_.restart(); } - if (signal_pending_.exchange(false)) { - rclpy_context_.attr("try_shutdown")(); - } + CheckForSignals(); } void EventsExecutor::spin_once(std::optional timeout_sec) @@ -222,9 +220,7 @@ void EventsExecutor::spin_once(std::optional timeout_sec) io_context_.restart(); } - if (signal_pending_.exchange(false)) { - rclpy_context_.attr("try_shutdown")(); - } + CheckForSignals(); } void EventsExecutor::spin_until_future_complete( @@ -819,10 +815,8 @@ void EventsExecutor::IterateTask(py::handle task) if (!ex.is_none()) { // It's not clear how to easily turn a Python exception into a C++ one, so let's just throw it // again and let pybind translate it normally. - py::dict scope; - scope["ex"] = ex; try { - py::exec("raise ex", scope); + Raise(ex); } catch (py::error_already_set & cpp_ex) { // There's no good way to know what node this task came from. If we only have one node, we // can use the logger from that, otherwise we'll have to leave it undefined. @@ -884,6 +878,30 @@ logger.warn("Error occurred at:\n" + "".join(traceback.format_tb(exc_trace))) scope); } +void EventsExecutor::Raise(py::object ex) +{ + py::dict scope; + scope["ex"] = ex; + py::exec("raise ex", scope); +} + +void EventsExecutor::CheckForSignals() +{ + const int signum = signal_pending_.exchange(0); + if (signum) { + rclpy_context_.attr("try_shutdown")(); + if (signum == SIGINT) { + PyErr_SetInterrupt(); + return; + } + } + + const bool ok = py::cast(rclpy_context_.attr("ok")()); + if (!ok) { + Raise(py::module_::import("rclpy.executors").attr("ExternalShutdownException")()); + } +} + // pybind11 module bindings void define_events_executor(py::object module) diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 0ff977a89..63b68b595 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -148,6 +148,12 @@ class EventsExecutor void HandleCallbackExceptionWithLogger( const pybind11::error_already_set &, pybind11::object logger, const std::string & entity_type); + /// Raises the given python object instance as a Python exception + void Raise(pybind11::object); + + /// Handles any pending signals; needs to be performed at the end of every spin*() method. + void CheckForSignals(); + const pybind11::object rclpy_context_; // Imported python objects we depend on @@ -157,11 +163,11 @@ class EventsExecutor asio::io_context io_context_; asio::signal_set signals_; - std::recursive_mutex nodes_mutex_; ///< Protects the node set - pybind11::set nodes_; ///< The set of all nodes we're executing - std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made - std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning - std::atomic signal_pending_{}; + std::recursive_mutex nodes_mutex_; ///< Protects the node set + pybind11::set nodes_; ///< The set of all nodes we're executing + std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made + std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning + std::atomic signal_pending_{}; ///< Signal number of caught signal, 0 if none // Collection of awaitable entities we're servicing pybind11::set subscriptions_; From 447df5e66da5b6a30ba4f3ff8c38ff9185584e92 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 13 Jan 2025 20:49:16 +0000 Subject: [PATCH 10/31] test_executor.py: Add coverage for EventsExecutor Tests that currently work are enabled, and those that don't are annotated what needs to be done before they can be enabled Signed-off-by: Brad Martin --- rclpy/test/test_executor.py | 223 +++++++++++++++++++++--------------- 1 file changed, 129 insertions(+), 94 deletions(-) diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 7b7027486..6b03ee364 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -25,6 +25,7 @@ from rclpy.executors import MultiThreadedExecutor from rclpy.executors import ShutdownException from rclpy.executors import SingleThreadedExecutor +from rclpy.experimental import EventsExecutor from rclpy.task import Future from test_msgs.srv import Empty @@ -61,6 +62,8 @@ def timer_callback() -> None: def test_single_threaded_executor_executes(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) try: self.assertTrue(self.func_execution(executor)) @@ -69,6 +72,7 @@ def test_single_threaded_executor_executes(self) -> None: def test_executor_immediate_shutdown(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.shutdown() wedges here, not sure why yet executor = SingleThreadedExecutor(context=self.context) try: got_callback = False @@ -113,26 +117,27 @@ def test_shutdown_exception_from_callback_generator(self) -> None: def test_remove_node(self) -> None: self.assertIsNotNone(self.node.handle) - executor = SingleThreadedExecutor(context=self.context) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) - got_callback = False + got_callback = False - def timer_callback() -> None: - nonlocal got_callback - got_callback = True + def timer_callback() -> None: + nonlocal got_callback + got_callback = True - try: - tmr = self.node.create_timer(0.1, timer_callback) try: - executor.add_node(self.node) - executor.remove_node(self.node) - executor.spin_once(timeout_sec=0.2) + tmr = self.node.create_timer(0.1, timer_callback) + try: + executor.add_node(self.node) + executor.remove_node(self.node) + executor.spin_once(timeout_sec=0.2) + finally: + self.node.destroy_timer(tmr) finally: - self.node.destroy_timer(tmr) - finally: - executor.shutdown() + executor.shutdown() - assert not got_callback + assert not got_callback def test_multi_threaded_executor_num_threads(self) -> None: self.assertIsNotNone(self.node.handle) @@ -188,12 +193,15 @@ def get_threads(): def test_add_node_to_executor(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.get_nodes() method doesn't exist executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) self.assertIn(self.node, executor.get_nodes()) def test_executor_spin_non_blocking(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) start = time.monotonic() @@ -203,6 +211,8 @@ def test_executor_spin_non_blocking(self) -> None: def test_execute_coroutine_timer(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -231,6 +241,7 @@ async def coroutine() -> None: def test_execute_coroutine_guard_condition(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) Does EventsExecutor need to support guard conditions? executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -260,6 +271,8 @@ async def coroutine() -> None: def test_create_task_coroutine(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -275,6 +288,8 @@ async def coroutine(): def test_create_task_normal_function(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -290,6 +305,8 @@ def func(): def test_create_task_fifo_order(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -316,6 +333,8 @@ async def coro2(): def test_create_task_dependent_coroutines(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -346,6 +365,8 @@ async def coro2(): def test_create_task_during_spin(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -395,6 +416,8 @@ def __await__(self): yield return + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. trigger = TriggerAwait() did_callback = False did_return = False @@ -418,94 +441,100 @@ async def timer_callback() -> None: def test_executor_add_node(self) -> None: self.assertIsNotNone(self.node.handle) - executor = SingleThreadedExecutor(context=self.context) - assert executor.add_node(self.node) - assert id(executor) == id(self.node.executor) - assert not executor.add_node(self.node) - assert id(executor) == id(self.node.executor) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + assert executor.add_node(self.node) + assert id(executor) == id(self.node.executor) + assert not executor.add_node(self.node) + assert id(executor) == id(self.node.executor) def test_executor_spin_until_future_complete_timeout(self) -> None: self.assertIsNotNone(self.node.handle) - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) - - def timer_callback() -> None: - pass - timer = self.node.create_timer(0.003, timer_callback) - - # Timeout - future = Future() - self.assertFalse(future.done()) - start = time.monotonic() - executor.spin_until_future_complete(future=future, timeout_sec=0.1) - end = time.monotonic() - # Nothing is ever setting the future, so this should have waited - # at least 0.1 seconds. - self.assertGreaterEqual(end - start, 0.1) - self.assertFalse(future.done()) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - timer.cancel() + def timer_callback() -> None: + pass + timer = self.node.create_timer(0.003, timer_callback) + + # Timeout + future = Future[None]() + self.assertFalse(future.done()) + start = time.monotonic() + executor.spin_until_future_complete(future=future, timeout_sec=0.1) + end = time.monotonic() + # Nothing is ever setting the future, so this should have waited + # at least 0.1 seconds. + self.assertGreaterEqual(end - start, 0.1) + self.assertFalse(future.done()) + + timer.cancel() def test_executor_spin_until_future_complete_future_done(self) -> None: self.assertIsNotNone(self.node.handle) - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) - - def timer_callback() -> None: - pass - timer = self.node.create_timer(0.003, timer_callback) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - def set_future_result(future): - future.set_result('finished') + def timer_callback() -> None: + pass + timer = self.node.create_timer(0.003, timer_callback) - # Future complete timeout_sec > 0 - future = Future() - self.assertFalse(future.done()) - t = threading.Thread(target=lambda: set_future_result(future)) - t.start() - executor.spin_until_future_complete(future=future, timeout_sec=0.2) - self.assertTrue(future.done()) - self.assertEqual(future.result(), 'finished') + def set_future_result(future): + future.set_result('finished') - # Future complete timeout_sec = None - future = Future() - self.assertFalse(future.done()) - t = threading.Thread(target=lambda: set_future_result(future)) - t.start() - executor.spin_until_future_complete(future=future, timeout_sec=None) - self.assertTrue(future.done()) - self.assertEqual(future.result(), 'finished') - - # Future complete timeout < 0 - future = Future() - self.assertFalse(future.done()) - t = threading.Thread(target=lambda: set_future_result(future)) - t.start() - executor.spin_until_future_complete(future=future, timeout_sec=-1) - self.assertTrue(future.done()) - self.assertEqual(future.result(), 'finished') + # Future complete timeout_sec > 0 + future = Future[str]() + self.assertFalse(future.done()) + t = threading.Thread(target=lambda: set_future_result(future)) + t.start() + executor.spin_until_future_complete(future=future, timeout_sec=0.2) + self.assertTrue(future.done()) + self.assertEqual(future.result(), 'finished') + + # Future complete timeout_sec = None + future = Future() + self.assertFalse(future.done()) + t = threading.Thread(target=lambda: set_future_result(future)) + t.start() + executor.spin_until_future_complete(future=future, timeout_sec=None) + self.assertTrue(future.done()) + self.assertEqual(future.result(), 'finished') + + # Future complete timeout < 0 + future = Future() + self.assertFalse(future.done()) + t = threading.Thread(target=lambda: set_future_result(future)) + t.start() + executor.spin_until_future_complete(future=future, timeout_sec=-1) + self.assertTrue(future.done()) + self.assertEqual(future.result(), 'finished') - timer.cancel() + timer.cancel() def test_executor_spin_until_future_complete_do_not_wait(self) -> None: self.assertIsNotNone(self.node.handle) - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - def timer_callback() -> None: - pass - timer = self.node.create_timer(0.003, timer_callback) + def timer_callback() -> None: + pass + timer = self.node.create_timer(0.003, timer_callback) - # Do not wait timeout_sec = 0 - future = Future() - self.assertFalse(future.done()) - executor.spin_until_future_complete(future=future, timeout_sec=0) - self.assertFalse(future.done()) + # Do not wait timeout_sec = 0 + future = Future[None]() + self.assertFalse(future.done()) + executor.spin_until_future_complete(future=future, timeout_sec=0) + self.assertFalse(future.done()) - timer.cancel() + timer.cancel() def test_executor_add_node_wakes_executor(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. got_callback = False def timer_callback() -> None: @@ -536,24 +565,27 @@ def shutdown_executor_from_callback(self) -> None: """https://github.com/ros2/rclpy/issues/944: allow for executor shutdown from callback.""" self.assertIsNotNone(self.node.handle) timer_period = 0.1 - executor = SingleThreadedExecutor(context=self.context) - shutdown_event = threading.Event() + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + shutdown_event = threading.Event() - def timer_callback() -> None: - nonlocal shutdown_event, executor - executor.shutdown() - shutdown_event.set() + def timer_callback() -> None: + nonlocal shutdown_event, executor + executor.shutdown() + shutdown_event.set() - tmr = self.node.create_timer(timer_period, timer_callback) - executor.add_node(self.node) - t = threading.Thread(target=executor.spin, daemon=True) - t.start() - self.assertTrue(shutdown_event.wait(120)) - self.node.destroy_timer(tmr) + tmr = self.node.create_timer(timer_period, timer_callback) + executor.add_node(self.node) + t = threading.Thread(target=executor.spin, daemon=True) + t.start() + self.assertTrue(shutdown_event.wait(120)) + self.node.destroy_timer(tmr) def test_context_manager(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) Context management for executors is new since Iron and not supported by + # EventsExecutor yet. executor: Executor = SingleThreadedExecutor(context=self.context) with executor as the_executor: @@ -569,6 +601,8 @@ def test_context_manager(self) -> None: def test_single_threaded_spin_once_until_future(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor.spin_once*() isn't currently guaranteed to run a + # user-visible callback, as opposed to an internal update callback. executor = SingleThreadedExecutor(context=self.context) future = Future(executor=executor) @@ -625,6 +659,7 @@ def test_multi_threaded_spin_once_until_future(self) -> None: def test_not_lose_callback(self) -> None: self.assertIsNotNone(self.node.handle) + # TODO(bmartin427) EventsExecutor has some kind of explosion here I haven't figured out yet executor = SingleThreadedExecutor(context=self.context) callback_group = ReentrantCallbackGroup() From 4a263132d69a80ce6c97af56d42e65c0ab3f5ce8 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 13 Jan 2025 21:11:25 +0000 Subject: [PATCH 11/31] Make spin_once() only return after a user-visible callback Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 33 ++- .../rclpy/events_executor/events_executor.hpp | 4 + rclpy/test/test_executor.py | 255 +++++++++--------- 3 files changed, 154 insertions(+), 138 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index e6814a2a0..6c7f34685 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -188,8 +188,13 @@ void EventsExecutor::spin(std::optional timeout_sec) // Don't let asio auto stop if there's nothing to do const auto work = asio::make_work_guard(io_context_); if (timeout_sec) { - io_context_.run_for(std::chrono::duration_cast( - std::chrono::duration(*timeout_sec))); + const auto timeout_ns = std::chrono::duration_cast( + std::chrono::duration(*timeout_sec)); + const auto end = std::chrono::steady_clock::now() + timeout_ns; + // Dispatch anything that's immediately ready, even with zero timeout + io_context_.poll(); + // Now possibly block until the end of the timeout period + io_context_.run_until(end); } else { io_context_.run(); } @@ -211,11 +216,23 @@ void EventsExecutor::spin_once(std::optional timeout_sec) py::gil_scoped_release gil_release; // Don't let asio auto stop if there's nothing to do const auto work = asio::make_work_guard(io_context_); + // We can't just use asio run_one*() here, because 'one' asio callback might include some + // internal housekeeping, and not a user-visible callback that was intended when the Executor + // was asked to dispatch 'once'. + ran_user_ = false; if (timeout_sec) { - io_context_.run_one_for(std::chrono::duration_cast( - std::chrono::duration(*timeout_sec))); + const auto timeout_ns = std::chrono::duration_cast( + std::chrono::duration(*timeout_sec)); + const auto end = std::chrono::steady_clock::now() + timeout_ns; + // Dispatch anything that's immediately ready, even with zero timeout + while (io_context_.poll_one() && !ran_user_) { + } + // Now possibly block until the end of the timeout period + while (!ran_user_ && io_context_.run_one_until(end)) { + } } else { - io_context_.run_one(); + while (io_context_.run_one() && !ran_user_) { + } } io_context_.restart(); } @@ -344,6 +361,7 @@ void EventsExecutor::HandleRemovedSubscription(py::handle subscription) void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t number_of_events) { + ran_user_ = true; py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_subscription() and _execute_subcription(). @@ -390,6 +408,7 @@ void EventsExecutor::HandleRemovedTimer(py::handle timer) {timers_manager_.Remov void EventsExecutor::HandleTimerReady(py::handle timer) { + ran_user_ = true; py::gil_scoped_acquire gil_acquire; try { @@ -433,6 +452,7 @@ void EventsExecutor::HandleRemovedClient(py::handle client) void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_events) { + ran_user_ = true; py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_client() and _execute_client(). @@ -501,6 +521,7 @@ void EventsExecutor::HandleRemovedService(py::handle service) void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_events) { + ran_user_ = true; py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_service() and _execute_service(). @@ -774,6 +795,7 @@ void EventsExecutor::HandleWaitableEventReady( void EventsExecutor::HandleWaitableReady( py::handle waitable, std::shared_ptr wait_set, size_t number_of_events) { + ran_user_ = true; // Largely based on rclpy.Executor._take_waitable() // https://github.com/ros2/rclpy/blob/a19180c238d4d97ed2b58868d8fb7fa3e3b621f2/rclpy/rclpy/executors.py#L447-L454 py::object is_ready = waitable.attr("is_ready"); @@ -803,6 +825,7 @@ void EventsExecutor::HandleWaitableReady( void EventsExecutor::IterateTask(py::handle task) { + ran_user_ = true; py::gil_scoped_acquire gil_acquire; // Calling this won't throw, but it may set the exception property on the task object. task(); diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 63b68b595..19cb81de1 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -169,6 +169,10 @@ class EventsExecutor std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning std::atomic signal_pending_{}; ///< Signal number of caught signal, 0 if none + /// This flag is used by spin_once() to determine if a user-visible callback has been dispatched + /// during its operation. + bool ran_user_{}; + // Collection of awaitable entities we're servicing pybind11::set subscriptions_; pybind11::set timers_; diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 6b03ee364..1a0059ecb 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -62,13 +62,12 @@ def timer_callback() -> None: def test_single_threaded_executor_executes(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) - try: - self.assertTrue(self.func_execution(executor)) - finally: - executor.shutdown() + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + try: + self.assertTrue(self.func_execution(executor)) + finally: + executor.shutdown() def test_executor_immediate_shutdown(self) -> None: self.assertIsNotNone(self.node.handle) @@ -200,19 +199,17 @@ def test_add_node_to_executor(self) -> None: def test_executor_spin_non_blocking(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) - start = time.monotonic() - executor.spin_once(timeout_sec=0) - end = time.monotonic() - self.assertLess(start - end, 0.001) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) + start = time.monotonic() + executor.spin_once(timeout_sec=0) + end = time.monotonic() + self.assertLess(start - end, 0.001) def test_execute_coroutine_timer(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. + # TODO(bmartin427) EventsExecutor doesn't yet properly handle coroutines executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -271,70 +268,66 @@ async def coroutine() -> None: def test_create_task_coroutine(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - async def coroutine(): - return 'Sentinel Result' + async def coroutine(): + return 'Sentinel Result' - future = executor.create_task(coroutine) - self.assertFalse(future.done()) + future = executor.create_task(coroutine) + self.assertFalse(future.done()) - executor.spin_once(timeout_sec=0) - self.assertTrue(future.done()) - self.assertEqual('Sentinel Result', future.result()) + executor.spin_once(timeout_sec=0) + self.assertTrue(future.done()) + self.assertEqual('Sentinel Result', future.result()) def test_create_task_normal_function(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - def func(): - return 'Sentinel Result' + def func(): + return 'Sentinel Result' - future = executor.create_task(func) - self.assertFalse(future.done()) + future = executor.create_task(func) + self.assertFalse(future.done()) - executor.spin_once(timeout_sec=0) - self.assertTrue(future.done()) - self.assertEqual('Sentinel Result', future.result()) + executor.spin_once(timeout_sec=0) + self.assertTrue(future.done()) + self.assertEqual('Sentinel Result', future.result()) def test_create_task_fifo_order(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - async def coro1(): - return 'Sentinel Result 1' + async def coro1(): + return 'Sentinel Result 1' - future1 = executor.create_task(coro1) + future1 = executor.create_task(coro1) - async def coro2(): - return 'Sentinel Result 2' + async def coro2(): + return 'Sentinel Result 2' - future2 = executor.create_task(coro2) + future2 = executor.create_task(coro2) - # Coro1 is the 1st task, so it gets executed in this spin - executor.spin_once(timeout_sec=0) - self.assertTrue(future1.done()) - self.assertEqual('Sentinel Result 1', future1.result()) - self.assertFalse(future2.done()) + # Coro1 is the 1st task, so it gets executed in this spin + executor.spin_once(timeout_sec=0) + self.assertTrue(future1.done()) + self.assertEqual('Sentinel Result 1', future1.result()) + self.assertFalse(future2.done()) - # Coro2 is the next in the queue, so it gets executed in this spin - executor.spin_once(timeout_sec=0) - self.assertTrue(future2.done()) - self.assertEqual('Sentinel Result 2', future2.result()) + # Coro2 is the next in the queue, so it gets executed in this spin + executor.spin_once(timeout_sec=0) + self.assertTrue(future2.done()) + self.assertEqual('Sentinel Result 2', future2.result()) def test_create_task_dependent_coroutines(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. + # TODO(bmartin427) EventsExecutor doesn't yet properly handle coroutines executor = SingleThreadedExecutor(context=self.context) executor.add_node(self.node) @@ -365,43 +358,42 @@ async def coro2(): def test_create_task_during_spin(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - future = None + future = None - def spin_until_task_done(executor): - nonlocal future - while future is None or not future.done(): - try: - executor.spin_once() - finally: - executor.shutdown() - break + def spin_until_task_done(executor): + nonlocal future + while future is None or not future.done(): + try: + executor.spin_once() + finally: + executor.shutdown() + break - # Start spinning in a separate thread - thr = threading.Thread(target=spin_until_task_done, args=(executor, ), daemon=True) - thr.start() + # Start spinning in a separate thread + thr = threading.Thread(target=spin_until_task_done, args=(executor, ), daemon=True) + thr.start() - # Sleep in this thread to give the executor a chance to reach the loop in - # '_wait_for_ready_callbacks()' - time.sleep(1) + # Sleep in this thread to give the executor a chance to reach the loop in + # '_wait_for_ready_callbacks()' + time.sleep(1) - def func(): - return 'Sentinel Result' + def func(): + return 'Sentinel Result' - # Create a task - future = executor.create_task(func) + # Create a task + future = executor.create_task(func) - thr.join(timeout=0.5) - # If the join timed out, remove the node to cause the spin thread to stop - if thr.is_alive(): - executor.remove_node(self.node) + thr.join(timeout=0.5) + # If the join timed out, remove the node to cause the spin thread to stop + if thr.is_alive(): + executor.remove_node(self.node) - self.assertTrue(future.done()) - self.assertEqual('Sentinel Result', future.result()) + self.assertTrue(future.done()) + self.assertEqual('Sentinel Result', future.result()) def test_global_executor_completes_async_task(self) -> None: self.assertIsNotNone(self.node.handle) @@ -416,8 +408,7 @@ def __await__(self): yield return - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. + # TODO(bmartin427) EventsExecutor doesn't yet properly handle async callbacks trigger = TriggerAwait() did_callback = False did_return = False @@ -533,33 +524,32 @@ def timer_callback() -> None: def test_executor_add_node_wakes_executor(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - got_callback = False + for cls in [SingleThreadedExecutor, EventsExecutor]: + got_callback = False - def timer_callback() -> None: - nonlocal got_callback - got_callback = True + def timer_callback() -> None: + nonlocal got_callback + got_callback = True - timer_period = 0.1 - tmr = self.node.create_timer(timer_period, timer_callback) + timer_period = 0.1 + tmr = self.node.create_timer(timer_period, timer_callback) - executor = SingleThreadedExecutor(context=self.context) - try: - # spin in background - t = threading.Thread(target=executor.spin_once, daemon=True) - t.start() - # sleep to make sure executor is blocked in rcl_wait - time.sleep(0.5) + executor = cls(context=self.context) + try: + # spin in background + t = threading.Thread(target=executor.spin_once, daemon=True) + t.start() + # sleep to make sure executor is blocked in rcl_wait + time.sleep(0.5) - self.assertTrue(executor.add_node(self.node)) - # Make sure timer has time to trigger - time.sleep(timer_period) + self.assertTrue(executor.add_node(self.node)) + # Make sure timer has time to trigger + time.sleep(timer_period) - self.assertTrue(got_callback) - finally: - executor.shutdown() - self.node.destroy_timer(tmr) + self.assertTrue(got_callback) + finally: + executor.shutdown() + self.node.destroy_timer(tmr) def shutdown_executor_from_callback(self) -> None: """https://github.com/ros2/rclpy/issues/944: allow for executor shutdown from callback.""" @@ -601,33 +591,32 @@ def test_context_manager(self) -> None: def test_single_threaded_spin_once_until_future(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.spin_once*() isn't currently guaranteed to run a - # user-visible callback, as opposed to an internal update callback. - executor = SingleThreadedExecutor(context=self.context) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) - future = Future(executor=executor) + future = Future[bool](executor=executor) - # Setup a thread to spin_once_until_future_complete, which will spin - # for a maximum of 10 seconds. - start = time.time() - thread = threading.Thread(target=executor.spin_once_until_future_complete, - args=(future, 10)) - thread.start() + # Setup a thread to spin_once_until_future_complete, which will spin + # for a maximum of 10 seconds. + start = time.time() + thread = threading.Thread(target=executor.spin_once_until_future_complete, + args=(future, 10)) + thread.start() - # Mark the future as complete immediately - future.set_result(True) + # Mark the future as complete immediately + future.set_result(True) - thread.join() - end = time.time() + thread.join() + end = time.time() - time_spent = end - start + time_spent = end - start - # Since we marked the future as complete immediately, the amount of - # time we spent should be *substantially* less than the 10 second - # timeout we set on the spin. - assert time_spent < 10 + # Since we marked the future as complete immediately, the amount of + # time we spent should be *substantially* less than the 10 second + # timeout we set on the spin. + assert time_spent < 10 - executor.shutdown() + executor.shutdown() def test_multi_threaded_spin_once_until_future(self) -> None: self.assertIsNotNone(self.node.handle) From 9e95b53843680a3e81df013f96af797de17be786 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 13 Jan 2025 22:20:44 +0000 Subject: [PATCH 12/31] Add get_nodes() method Signed-off-by: Brad Martin --- rclpy/src/rclpy/events_executor/events_executor.cpp | 3 +++ rclpy/src/rclpy/events_executor/events_executor.hpp | 1 + rclpy/test/test_executor.py | 8 ++++---- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 6c7f34685..b3dafcdaa 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -170,6 +170,8 @@ void EventsExecutor::wake() } } +py::list EventsExecutor::get_nodes() const {return nodes_;} + // NOTE: The timeouts on the below two methods are always realtime even if we're running in debug // time. This is true of other executors too, because debug time is always associated with a // specific node and more than one node may be connected to an executor instance. @@ -937,6 +939,7 @@ void define_events_executor(py::object module) .def("add_node", &EventsExecutor::add_node, py::arg("node")) .def("remove_node", &EventsExecutor::remove_node, py::arg("node")) .def("wake", &EventsExecutor::wake) + .def("get_nodes", &EventsExecutor::get_nodes) .def("spin", [](EventsExecutor & exec) {exec.spin();}) .def("spin_once", &EventsExecutor::spin_once, py::arg("timeout_sec") = py::none()) .def( diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 19cb81de1..ba5a7091b 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -69,6 +69,7 @@ class EventsExecutor bool add_node(pybind11::object node); void remove_node(pybind11::handle node); void wake(); + pybind11::list get_nodes() const; void spin(std::optional timeout_sec = {}); void spin_once(std::optional timeout_sec = {}); void spin_until_future_complete(pybind11::handle future, std::optional timeout_sec = {}); diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 1a0059ecb..46ef7f39f 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -192,10 +192,10 @@ def get_threads(): def test_add_node_to_executor(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.get_nodes() method doesn't exist - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) - self.assertIn(self.node, executor.get_nodes()) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) + self.assertIn(self.node, executor.get_nodes()) def test_executor_spin_non_blocking(self) -> None: self.assertIsNotNone(self.node.handle) From 99f6f93d0953e6abebcb3294c61721c73ab3df10 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 13 Jan 2025 17:41:52 -0500 Subject: [PATCH 13/31] Add context management support Signed-off-by: Brad Martin --- rclpy/src/rclpy/events_executor/events_executor.cpp | 7 ++++++- rclpy/src/rclpy/events_executor/events_executor.hpp | 2 ++ rclpy/test/test_executor.py | 5 +++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index b3dafcdaa..f41548423 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -256,6 +256,9 @@ void EventsExecutor::spin_once_until_future_complete( spin_once(timeout_sec); } +EventsExecutor * EventsExecutor::enter() {return this;} +void EventsExecutor::exit(py::object, py::object, py::object) {shutdown();} + void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) { // Clear pending flag as early as possible, so we error on the side of retriggering a few harmless @@ -947,7 +950,9 @@ void define_events_executor(py::object module) py::arg("timeout_sec") = py::none()) .def( "spin_once_until_future_complete", &EventsExecutor::spin_once_until_future_complete, - py::arg("future"), py::arg("timeout_sec") = py::none()); + py::arg("future"), py::arg("timeout_sec") = py::none()) + .def("__enter__", &EventsExecutor::enter) + .def("__exit__", &EventsExecutor::exit); } } // namespace events_executor diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index ba5a7091b..fdbe28095 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -75,6 +75,8 @@ class EventsExecutor void spin_until_future_complete(pybind11::handle future, std::optional timeout_sec = {}); void spin_once_until_future_complete( pybind11::handle future, std::optional timeout_sec = {}); + EventsExecutor * enter(); + void exit(pybind11::object, pybind11::object, pybind11::object); private: // Structure to hold entities discovered underlying a Waitable object. diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 46ef7f39f..6cf085274 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -99,6 +99,7 @@ def timer_callback() -> None: def test_shutdown_executor_before_waiting_for_callbacks(self) -> None: self.assertIsNotNone(self.node.handle) + # EventsExecutor does not support the wait_for_ready_callbacks() API for cls in [SingleThreadedExecutor, MultiThreadedExecutor]: executor = cls(context=self.context) executor.shutdown() @@ -107,6 +108,7 @@ def test_shutdown_executor_before_waiting_for_callbacks(self) -> None: def test_shutdown_exception_from_callback_generator(self) -> None: self.assertIsNotNone(self.node.handle) + # This test touches the Executor private API and is not compatible with EventsExecutor for cls in [SingleThreadedExecutor, MultiThreadedExecutor]: executor = cls(context=self.context) cb_generator = executor._wait_for_ready_callbacks() @@ -574,8 +576,7 @@ def timer_callback() -> None: def test_context_manager(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) Context management for executors is new since Iron and not supported by - # EventsExecutor yet. + # This test touches the Executor private API and is not compatible with EventsExecutor executor: Executor = SingleThreadedExecutor(context=self.context) with executor as the_executor: From dde4442982ee053131d5a7458a3e8dd03780f3e9 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Thu, 16 Jan 2025 21:09:05 +0000 Subject: [PATCH 14/31] Remove mutex protection on nodes_ member access It was being used only inconsistently, and on reflection it wasn't adding any protection beyond what the GIL already provides. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 35 ++++++++----------- .../rclpy/events_executor/events_executor.hpp | 1 - 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index f41548423..675a80431 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -136,7 +136,6 @@ bool EventsExecutor::shutdown(std::optional timeout) bool EventsExecutor::add_node(py::object node) { - std::lock_guard lock(nodes_mutex_); if (nodes_.contains(node)) { return false; } @@ -149,7 +148,6 @@ bool EventsExecutor::add_node(py::object node) void EventsExecutor::remove_node(py::handle node) { - std::lock_guard lock(nodes_mutex_); if (!nodes_.contains(node)) { return; } @@ -271,26 +269,23 @@ void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) py::set clients; py::set services; py::set waitables; - { - std::lock_guard lock(nodes_mutex_); - if (!shutdown) { - for (py::handle node : nodes_) { - subscriptions.attr("update")(py::set(node.attr("subscriptions"))); - timers.attr("update")(py::set(node.attr("timers"))); - clients.attr("update")(py::set(node.attr("clients"))); - services.attr("update")(py::set(node.attr("services"))); - waitables.attr("update")(py::set(node.attr("waitables"))); - - // It doesn't seem to be possible to support guard conditions with a callback-based (as - // opposed to waitset-based) API. Fortunately we don't seem to need to. - if (!py::set(node.attr("guards")).empty()) { - throw std::runtime_error("Guard conditions not supported"); - } + if (!shutdown) { + for (py::handle node : nodes_) { + subscriptions.attr("update")(py::set(node.attr("subscriptions"))); + timers.attr("update")(py::set(node.attr("timers"))); + clients.attr("update")(py::set(node.attr("clients"))); + services.attr("update")(py::set(node.attr("services"))); + waitables.attr("update")(py::set(node.attr("waitables"))); + + // It doesn't seem to be possible to support guard conditions with a callback-based (as + // opposed to waitset-based) API. Fortunately we don't seem to need to. + if (!py::set(node.attr("guards")).empty()) { + throw std::runtime_error("Guard conditions not supported"); } - } else { - // Remove all tracked entities and nodes. - nodes_.clear(); } + } else { + // Remove all tracked entities and nodes. + nodes_.clear(); } // Perform updates for added and removed entities diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index fdbe28095..f4734e04f 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -166,7 +166,6 @@ class EventsExecutor asio::io_context io_context_; asio::signal_set signals_; - std::recursive_mutex nodes_mutex_; ///< Protects the node set pybind11::set nodes_; ///< The set of all nodes we're executing std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning From aabfc1826f6edfc66e4195c366bffa7fab03161e Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Thu, 16 Jan 2025 21:26:44 +0000 Subject: [PATCH 15/31] Fix deadlock during shutdown() Needed to release the GIL while blocking on another lock. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 18 ++++---- rclpy/test/test_executor.py | 42 +++++++++---------- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 675a80431..22f1ba7ec 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -117,15 +117,19 @@ bool EventsExecutor::shutdown(std::optional timeout) io_context_.stop(); - // Block until spinning is done, or timeout. - std::unique_lock spin_lock(spinning_mutex_, std::defer_lock); - if (timeout) { - if (!spin_lock.try_lock_for(std::chrono::duration(*timeout))) { - return false; + // Block until spinning is done, or timeout. Release the GIL while we block though. + { + py::gil_scoped_release gil_release; + std::unique_lock spin_lock(spinning_mutex_, std::defer_lock); + if (timeout) { + if (!spin_lock.try_lock_for(std::chrono::duration(*timeout))) { + return false; + } + } else { + spin_lock.lock(); } - } else { - spin_lock.lock(); } + // Tear down any callbacks we still have registered. for (py::handle node : py::list(nodes_)) { remove_node(node); diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 6cf085274..6d56f2f54 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -71,31 +71,31 @@ def test_single_threaded_executor_executes(self) -> None: def test_executor_immediate_shutdown(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor.shutdown() wedges here, not sure why yet - executor = SingleThreadedExecutor(context=self.context) - try: - got_callback = False + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + try: + got_callback = False - def timer_callback() -> None: - nonlocal got_callback - got_callback = True + def timer_callback() -> None: + nonlocal got_callback + got_callback = True - timer_period = 1 - tmr = self.node.create_timer(timer_period, timer_callback) + timer_period = 1 + tmr = self.node.create_timer(timer_period, timer_callback) - self.assertTrue(executor.add_node(self.node)) - t = threading.Thread(target=executor.spin, daemon=True) - start_time = time.monotonic() - t.start() - executor.shutdown() - t.join() - end_time = time.monotonic() + self.assertTrue(executor.add_node(self.node)) + t = threading.Thread(target=executor.spin, daemon=True) + start_time = time.monotonic() + t.start() + executor.shutdown() + t.join() + end_time = time.monotonic() - self.node.destroy_timer(tmr) - self.assertLess(end_time - start_time, timer_period / 2) - self.assertFalse(got_callback) - finally: - executor.shutdown() + self.node.destroy_timer(tmr) + self.assertLess(end_time - start_time, timer_period / 2) + self.assertFalse(got_callback) + finally: + executor.shutdown() def test_shutdown_executor_before_waiting_for_callbacks(self) -> None: self.assertIsNotNone(self.node.handle) From 4a181d84abbf8721dd0d14d3c1b4d9adfa5c6717 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 20 Jan 2025 22:35:18 +0000 Subject: [PATCH 16/31] A little further on using C++ _rclpy types instead of Python interface Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 60 +++++++++---------- .../rclpy/events_executor/timers_manager.cpp | 2 +- 2 files changed, 28 insertions(+), 34 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 22f1ba7ec..2e2aa6f9d 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -338,7 +338,7 @@ void EventsExecutor::HandleAddedSubscription(py::handle subscription) { py::handle handle = subscription.attr("handle"); auto with = std::make_shared(handle); - const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); + const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); if ( RCL_RET_OK != @@ -354,7 +354,7 @@ void EventsExecutor::HandleAddedSubscription(py::handle subscription) void EventsExecutor::HandleRemovedSubscription(py::handle subscription) { py::handle handle = subscription.attr("handle"); - const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); + const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); if (RCL_RET_OK != rcl_subscription_set_on_new_message_callback(rcl_ptr, nullptr, nullptr)) { throw std::runtime_error( std::string("Failed to clear the on new message callback for subscription: ") + @@ -373,10 +373,9 @@ void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t num // // NOTE: Simple object attributes we can count on to be owned by the parent object, but bound // method calls and function return values need to be owned by us. - const py::handle handle = subscription.attr("handle"); - const py::object take_message = handle.attr("take_message"); - const py::handle msg_type = subscription.attr("msg_type"); - const py::handle raw = subscription.attr("raw"); + Subscription & _rclpy_sub = py::cast(subscription.attr("handle")); + const py::object msg_type = subscription.attr("msg_type"); + const bool raw = py::cast(subscription.attr("raw")); const int callback_type = py::cast(subscription.attr("_callback_type").attr("value")); const int message_only = py::cast(subscription.attr("CallbackType").attr("MessageOnly").attr("value")); @@ -388,7 +387,7 @@ void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t num // getting None in that case. https://github.com/ros2/rmw_cyclonedds/issues/509 bool got_none = false; for (size_t i = 0; number_of_events ? i < number_of_events : !got_none; ++i) { - py::object msg_info = take_message(msg_type, raw); + py::object msg_info = _rclpy_sub.take_message(msg_type, raw); if (!msg_info.is_none()) { try { if (callback_type == message_only) { @@ -429,7 +428,7 @@ void EventsExecutor::HandleAddedClient(py::handle client) { py::handle handle = client.attr("handle"); auto with = std::make_shared(handle); - const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); + const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleClientReady, this, client, pl::_1); if ( RCL_RET_OK != @@ -445,7 +444,7 @@ void EventsExecutor::HandleAddedClient(py::handle client) void EventsExecutor::HandleRemovedClient(py::handle client) { py::handle handle = client.attr("handle"); - const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); + const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); if (RCL_RET_OK != rcl_client_set_on_new_response_callback(rcl_ptr, nullptr, nullptr)) { throw std::runtime_error( std::string("Failed to clear the on new response callback for client: ") + @@ -461,14 +460,13 @@ void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_event // Largely based on rclpy.Executor._take_client() and _execute_client(). // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L369-L384 - const py::handle handle = client.attr("handle"); - const py::object take_response = handle.attr("take_response"); + Client & _rclpy_client = py::cast(client.attr("handle")); const py::handle srv_type = client.attr("srv_type"); - const py::handle res_type = srv_type.attr("Response"); + const py::object res_type = srv_type.attr("Response"); const py::object get_pending_request = client.attr("get_pending_request"); for (size_t i = 0; i < number_of_events; ++i) { - py::tuple seq_and_response = take_response(res_type); + py::tuple seq_and_response = _rclpy_client.take_response(res_type); py::handle header = seq_and_response[0]; py::handle response = seq_and_response[1]; if (!header.is_none()) { @@ -498,7 +496,7 @@ void EventsExecutor::HandleAddedService(py::handle service) { py::handle handle = service.attr("handle"); auto with = std::make_shared(handle); - const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); + const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleServiceReady, this, service, pl::_1); if ( RCL_RET_OK != @@ -514,7 +512,7 @@ void EventsExecutor::HandleAddedService(py::handle service) void EventsExecutor::HandleRemovedService(py::handle service) { py::handle handle = service.attr("handle"); - const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); + const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); if (RCL_RET_OK != rcl_service_set_on_new_request_callback(rcl_ptr, nullptr, nullptr)) { throw std::runtime_error( std::string("Failed to clear the on new request callback for service: ") + @@ -530,30 +528,26 @@ void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_eve // Largely based on rclpy.Executor._take_service() and _execute_service(). // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L386-L397 - const py::handle handle = service.attr("handle"); - const py::object service_take_request = handle.attr("service_take_request"); + Service & _rclpy_service = py::cast(service.attr("handle")); const py::handle srv_type = service.attr("srv_type"); - const py::handle req_type = srv_type.attr("Request"); + const py::object req_type = srv_type.attr("Request"); const py::handle res_type = srv_type.attr("Response"); const py::handle callback = service.attr("callback"); const py::object send_response = service.attr("send_response"); for (size_t i = 0; i < number_of_events; ++i) { - py::object maybe_request_and_header = service_take_request(req_type); - if (!maybe_request_and_header.is_none()) { - py::tuple request_and_header = py::cast(maybe_request_and_header); - py::handle request = request_and_header[0]; - py::handle header = request_and_header[1]; - if (!request.is_none()) { - py::object response; - try { - response = callback(request, res_type()); - } catch (const py::error_already_set & e) { - HandleCallbackExceptionInNodeEntity(e, service, "services"); - throw; - } - send_response(response, header); + py::tuple request_and_header = _rclpy_service.service_take_request(req_type); + py::handle request = request_and_header[0]; + py::handle header = request_and_header[1]; + if (!request.is_none()) { + py::object response; + try { + response = callback(request, res_type()); + } catch (const py::error_already_set & e) { + HandleCallbackExceptionInNodeEntity(e, service, "services"); + throw; } + send_response(response, header); } } } @@ -577,7 +571,7 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) py::cast(num_entities.attr("num_clients")), py::cast(num_entities.attr("num_services")), py::cast(num_entities.attr("num_events")), - *py::cast(rclpy_context_.attr("handle"))); + py::cast(rclpy_context_.attr("handle"))); auto with_waitset = std::make_shared(py::cast(wait_set)); waitable.attr("add_to_wait_set")(wait_set); rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index 777122e77..7c79c0e4f 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -316,7 +316,7 @@ void TimersManager::AddTimer(py::handle timer) PyRclMapping mapping; py::handle handle = timer.attr("handle"); mapping.with = std::make_unique(handle); - mapping.rcl_ptr = py::cast(handle).rcl_ptr(); + mapping.rcl_ptr = py::cast(handle).rcl_ptr(); rcl_manager_.AddTimer(mapping.rcl_ptr, std::bind(ready_callback_, timer)); timer_mappings_[timer] = std::move(mapping); } From 7cc7f5fb137e04e840031db868518f86665a5020 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 20 Jan 2025 23:12:13 +0000 Subject: [PATCH 17/31] Fix issue with iterating through incomplete Tasks Never bool-test a py::object::attr() return value without an explicit py::cast. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 2 +- rclpy/test/test_executor.py | 44 +++++++++---------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 2e2aa6f9d..954e08c94 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -827,7 +827,7 @@ void EventsExecutor::IterateTask(py::handle task) py::gil_scoped_acquire gil_acquire; // Calling this won't throw, but it may set the exception property on the task object. task(); - if (task.attr("done")()) { + if (py::cast(task.attr("done")())) { py::object ex = task.attr("exception")(); // Drop reference with GIL held. This doesn't necessarily destroy the underlying Task, since // the `create_task()` caller may have retained a reference to the returned value. diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 6d56f2f54..6f5d878cf 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -329,34 +329,34 @@ async def coro2(): def test_create_task_dependent_coroutines(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor doesn't yet properly handle coroutines - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - async def coro1(): - nonlocal future2 - await future2 - return 'Sentinel Result 1' + async def coro1(): + nonlocal future2 + await future2 + return 'Sentinel Result 1' - future1 = executor.create_task(coro1) + future1 = executor.create_task(coro1) - async def coro2(): - return 'Sentinel Result 2' + async def coro2(): + return 'Sentinel Result 2' - future2 = executor.create_task(coro2) + future2 = executor.create_task(coro2) - # Coro1 is the 1st task, so it gets to await future2 in this spin - executor.spin_once(timeout_sec=0) - # Coro2 execs in this spin - executor.spin_once(timeout_sec=0) - self.assertFalse(future1.done()) - self.assertTrue(future2.done()) - self.assertEqual('Sentinel Result 2', future2.result()) + # Coro1 is the 1st task, so it gets to await future2 in this spin + executor.spin_once(timeout_sec=0) + # Coro2 execs in this spin + executor.spin_once(timeout_sec=0) + self.assertFalse(future1.done()) + self.assertTrue(future2.done()) + self.assertEqual('Sentinel Result 2', future2.result()) - # Coro1 passes the await step here (timeout change forces new generator) - executor.spin_once(timeout_sec=1) - self.assertTrue(future1.done()) - self.assertEqual('Sentinel Result 1', future1.result()) + # Coro1 passes the await step here (timeout change forces new generator) + executor.spin_once(timeout_sec=1) + self.assertTrue(future1.done()) + self.assertEqual('Sentinel Result 1', future1.result()) def test_create_task_during_spin(self) -> None: self.assertIsNotNone(self.node.handle) From 3502872efcbf2deab4b0f8cf4c45770ea2f7902f Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 21 Jan 2025 01:08:02 +0000 Subject: [PATCH 18/31] Add support for coroutines to timer handling Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 25 +++--- .../rclpy/events_executor/events_executor.hpp | 4 +- rclpy/test/test_executor.py | 84 ++++++++++--------- 3 files changed, 57 insertions(+), 56 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 954e08c94..6383274d8 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -51,7 +51,7 @@ namespace events_executor EventsExecutor::EventsExecutor(py::object context) : rclpy_context_(context), - asyncio_run_(py::module_::import("asyncio").attr("run")), + inspect_iscoroutine_(py::module_::import("inspect").attr("iscoroutine")), rclpy_task_(py::module_::import("rclpy.task").attr("Task")), signals_(io_context_), rcl_callback_manager_(io_context_.get_executor()), @@ -411,13 +411,18 @@ void EventsExecutor::HandleRemovedTimer(py::handle timer) {timers_manager_.Remov void EventsExecutor::HandleTimerReady(py::handle timer) { - ran_user_ = true; py::gil_scoped_acquire gil_acquire; try { - // Unlike most rclpy objects this doesn't document whether it's a Callable or might be a - // Coroutine. Let's hope it's the former. - timer.attr("callback")(); + // The type markup claims this can't be a coroutine, but this seems to be a lie because the unit + // test does exactly that. + py::object result = timer.attr("callback")(); + if (py::cast(inspect_iscoroutine_(result))) { + // Create a Task to manage iteration of this coroutine later. + create_task(result); + } else { + ran_user_ = true; + } } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, timer, "timers"); throw; @@ -810,14 +815,8 @@ void EventsExecutor::HandleWaitableReady( throw std::runtime_error("Failed to make Waitable ready"); } py::object data = take_data(); - try { - // execute() is an async method, we need to use asyncio to run it - // TODO(bmartin427) Don't run all of this immediately, blocking everything else - asyncio_run_(execute(data)); - } catch (const py::error_already_set & e) { - HandleCallbackExceptionInNodeEntity(e, waitable, "waitables"); - throw; - } + // execute() is an async method, we need a Task to run it + create_task(execute(data)); } } diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index f4734e04f..40cbb3603 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -64,7 +64,7 @@ class EventsExecutor // rclpy Executor API methods: pybind11::object get_context() const {return rclpy_context_;} pybind11::object create_task( - pybind11::object callback, pybind11::args args, const pybind11::kwargs & kwargs); + pybind11::object callback, pybind11::args args = {}, const pybind11::kwargs & kwargs = {}); bool shutdown(std::optional timeout_sec = {}); bool add_node(pybind11::object node); void remove_node(pybind11::handle node); @@ -160,7 +160,7 @@ class EventsExecutor const pybind11::object rclpy_context_; // Imported python objects we depend on - const pybind11::object asyncio_run_; + const pybind11::object inspect_iscoroutine_; const pybind11::object rclpy_task_; asio::io_context io_context_; diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 6f5d878cf..d4949bae3 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -211,32 +211,34 @@ def test_executor_spin_non_blocking(self) -> None: def test_execute_coroutine_timer(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor doesn't yet properly handle coroutines - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) - - called1 = False - called2 = False - - async def coroutine() -> None: - nonlocal called1 - nonlocal called2 - called1 = True - await asyncio.sleep(0) - called2 = True - - tmr = self.node.create_timer(0.1, coroutine) - try: - executor.spin_once(timeout_sec=1.23) - self.assertTrue(called1) - self.assertFalse(called2) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) called1 = False - executor.spin_once(timeout_sec=0) - self.assertFalse(called1) - self.assertTrue(called2) - finally: - self.node.destroy_timer(tmr) + called2 = False + + async def coroutine() -> None: + nonlocal called1 + nonlocal called2 + called1 = True + await asyncio.sleep(0) + called2 = True + + # TODO(bmartin427) The type markup on Node.create_timer() says you can't pass a + # coroutine here. + tmr = self.node.create_timer(0.1, coroutine) + try: + executor.spin_once(timeout_sec=1.23) + self.assertTrue(called1) + self.assertFalse(called2) + + called1 = False + executor.spin_once(timeout_sec=0) + self.assertFalse(called1) + self.assertTrue(called2) + finally: + self.node.destroy_timer(tmr) def test_execute_coroutine_guard_condition(self) -> None: self.assertIsNotNone(self.node.handle) @@ -410,27 +412,27 @@ def __await__(self): yield return - # TODO(bmartin427) EventsExecutor doesn't yet properly handle async callbacks - trigger = TriggerAwait() - did_callback = False - did_return = False + for cls in [SingleThreadedExecutor, EventsExecutor]: + trigger = TriggerAwait() + did_callback = False + did_return = False - async def timer_callback() -> None: - nonlocal trigger, did_callback, did_return - did_callback = True - await trigger - did_return = True + async def timer_callback() -> None: + nonlocal trigger, did_callback, did_return + did_callback = True + await trigger + did_return = True - timer = self.node.create_timer(0.1, timer_callback) + timer = self.node.create_timer(0.1, timer_callback) - executor = SingleThreadedExecutor(context=self.context) - rclpy.spin_once(self.node, timeout_sec=0.5, executor=executor) - self.assertTrue(did_callback) + executor = cls(context=self.context) + rclpy.spin_once(self.node, timeout_sec=0.5, executor=executor) + self.assertTrue(did_callback) - timer.cancel() - trigger.do_yield = False - rclpy.spin_once(self.node, timeout_sec=0, executor=executor) - self.assertTrue(did_return) + timer.cancel() + trigger.do_yield = False + rclpy.spin_once(self.node, timeout_sec=0, executor=executor) + self.assertTrue(did_return) def test_executor_add_node(self) -> None: self.assertIsNotNone(self.node.handle) From 4dd05c658ea6674a47bb9b0ca3eebed8a04490ca Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 21 Jan 2025 02:51:37 +0000 Subject: [PATCH 19/31] Fix test_not_lose_callback() test to destroy entities properly EventsExecutor was exploding because the test was leaving destroyed entities in the node by using an incorrect API to destroy them. Signed-off-by: Brad Martin --- rclpy/test/test_executor.py | 44 ++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index d4949bae3..49ee1f41b 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -651,37 +651,37 @@ def test_multi_threaded_spin_once_until_future(self) -> None: def test_not_lose_callback(self) -> None: self.assertIsNotNone(self.node.handle) - # TODO(bmartin427) EventsExecutor has some kind of explosion here I haven't figured out yet - executor = SingleThreadedExecutor(context=self.context) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) - callback_group = ReentrantCallbackGroup() + callback_group = ReentrantCallbackGroup() - cli = self.node.create_client( - srv_type=Empty, srv_name='test_service', callback_group=callback_group) + cli = self.node.create_client( + srv_type=Empty, srv_name='test_service', callback_group=callback_group) - async def timer1_callback() -> None: - timer1.cancel() - await cli.call_async(Empty.Request()) + async def timer1_callback() -> None: + timer1.cancel() + await cli.call_async(Empty.Request()) - timer1 = self.node.create_timer(0.5, timer1_callback, callback_group) + timer1 = self.node.create_timer(0.5, timer1_callback, callback_group) - count = 0 + count = 0 - def timer2_callback() -> None: - nonlocal count - count += 1 - timer2 = self.node.create_timer(1.5, timer2_callback, callback_group) + def timer2_callback() -> None: + nonlocal count + count += 1 + timer2 = self.node.create_timer(1.5, timer2_callback, callback_group) - executor.add_node(self.node) - future = Future(executor=executor) - executor.spin_until_future_complete(future, 4) + executor.add_node(self.node) + future = Future[None](executor=executor) + executor.spin_until_future_complete(future, 4) - assert count == 2 + assert count == 2 - executor.shutdown() - timer2.destroy() - timer1.destroy() - cli.destroy() + executor.shutdown() + self.node.destroy_timer(timer2) + self.node.destroy_timer(timer1) + self.node.destroy_client(cli) if __name__ == '__main__': From 776a1a326ecb60c25b30ddea666cf7bb31a0e52a Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Wed, 22 Jan 2025 16:34:34 +0000 Subject: [PATCH 20/31] Correct test that wasn't running at all, and remove EventsExecutor from it * Test methods need to be prefixed with 'test_' in order to function. This had been entirely dead code, and when I enabled it EventsExecutor deadlocked. * On reflection, it seems like a deadlock is exactly what should be expected from what this test is doing. Remove EventsExecutor from the test and document the problem. Signed-off-by: Brad Martin --- rclpy/test/test_executor.py | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 49ee1f41b..8c04b165b 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -555,25 +555,28 @@ def timer_callback() -> None: executor.shutdown() self.node.destroy_timer(tmr) - def shutdown_executor_from_callback(self) -> None: + def test_shutdown_executor_from_callback(self) -> None: """https://github.com/ros2/rclpy/issues/944: allow for executor shutdown from callback.""" self.assertIsNotNone(self.node.handle) timer_period = 0.1 - for cls in [SingleThreadedExecutor, EventsExecutor]: - executor = cls(context=self.context) - shutdown_event = threading.Event() + # TODO(bmartin427) This seems like an invalid test to me? executor.shutdown() is + # documented as blocking until all callbacks are complete, unless you pass a non-negative + # timeout value which this doesn't. I'm not sure how that's supposed to *not* deadlock if + # you block on all callbacks from within a callback. + executor = SingleThreadedExecutor(context=self.context) + shutdown_event = threading.Event() - def timer_callback() -> None: - nonlocal shutdown_event, executor - executor.shutdown() - shutdown_event.set() + def timer_callback() -> None: + nonlocal shutdown_event, executor + executor.shutdown() + shutdown_event.set() - tmr = self.node.create_timer(timer_period, timer_callback) - executor.add_node(self.node) - t = threading.Thread(target=executor.spin, daemon=True) - t.start() - self.assertTrue(shutdown_event.wait(120)) - self.node.destroy_timer(tmr) + tmr = self.node.create_timer(timer_period, timer_callback) + executor.add_node(self.node) + t = threading.Thread(target=executor.spin, daemon=True) + t.start() + self.assertTrue(shutdown_event.wait(120)) + self.node.destroy_timer(tmr) def test_context_manager(self) -> None: self.assertIsNotNone(self.node.handle) From 8bfbfbdd9c3b85c2b8d81337212fb16621fa9456 Mon Sep 17 00:00:00 2001 From: Brad Martin <52003535+bmartin427@users.noreply.github.com> Date: Wed, 22 Jan 2025 16:46:24 -0500 Subject: [PATCH 21/31] Warn on every timer added over the threshold, not just the first Co-authored-by: Janosch Machowinski Signed-off-by: Brad Martin <52003535+bmartin427@users.noreply.github.com> --- rclpy/src/rclpy/events_executor/timers_manager.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index 7c79c0e4f..b67dbc9ad 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -149,7 +149,7 @@ class RclTimersManager::ClockManager std::string("Failed to set timer reset callback: ") + rcl_get_error_string().str); } timers_[timer] = ready_callback; - if (timers_.size() == WARN_TIMERS_COUNT) { + if (timers_.size() >= WARN_TIMERS_COUNT) { py::print("Warning, the number of timers associated with this clock is large."); py::print("Management of this number of timers may be inefficient."); } From 126c75421dbbe13f9aae92fddde68dccaba47896 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 24 Jan 2025 19:56:53 +0000 Subject: [PATCH 22/31] Keep rcl_timer_call() tightly coupled with user callback dispatch This both prevents an issue where user callbacks could potentially queue up if they didn't dispatch fast enough, and ensures that a timer that has passed its expiration without being dispatched yet can still be canceled without the user callback being delivered later. This commit also makes use of the new rcl API for querying the absolute timer expiration time, instead of the relative number of nanoseconds remaining until it expires. This should both make things more accurate, and potentially reduce overhead as we don't have to re-query the current time for each outstanding timer. Signed-off-by: Brad Martin --- .../rclpy/events_executor/timers_manager.cpp | 106 +++++++++------- rclpy/test/test_events_executor.py | 114 ++++++++---------- 2 files changed, 114 insertions(+), 106 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index b67dbc9ad..7b5f34c69 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -48,10 +49,6 @@ namespace // of timers outstanding at once. Assuming no more than a few timers exist in the whole process, // the heap seems like overkill. // * Every time a timer ticks or is reset, the heap needs to be resorted anyway. -// * The rcl timer interface doesn't expose any way to get timer expiration info in absolute terms; -// you can only find out 'time until next callback'. This means if you are trying to sort the -// list of timers, the 'value' of each entry in the heap changes depending on when you look at it -// during the process of sorting. // // We will however yell a bit if we ever see a large number of timers that disproves this // assumption, so we can reassess this decision. @@ -144,7 +141,9 @@ class RclTimersManager::ClockManager void AddTimer(rcl_timer_t * timer, std::function ready_callback) { // All timers have the same reset callback - if (RCL_RET_OK != rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) { + if ( + RCL_RET_OK != rcl_timer_set_on_reset_callback(timer, RclTimerResetTrampoline, &reset_cb_)) + { throw std::runtime_error( std::string("Failed to set timer reset callback: ") + rcl_get_error_string().str); } @@ -184,41 +183,31 @@ class RclTimersManager::ClockManager { // First, evaluate all of our timers and dispatch any that are ready now. While we're at it, // keep track of the earliest next timer callback that is due. - std::optional next_ready_ns; + int64_t now{}; + if (RCL_RET_OK != rcl_clock_get_now(clock_, &now)) { + throw std::runtime_error( + std::string("Failed to read RCL clock: ") + rcl_get_error_string().str); + } + std::optional next_ready_time_ns; for (const auto & timer_cb_pair : timers_) { - auto next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); - if (next_call_ns <= 0) { - // This just notifies RCL that we're considering the timer triggered, for the purposes of - // updating the next trigger time. - const auto ret = rcl_timer_call(timer_cb_pair.first); - switch (ret) { - case RCL_RET_OK: - break; - case RCL_RET_TIMER_CANCELED: - // Someone apparently canceled the timer *after* we just queried the next call time? - // Nevermind, then... - rcl_reset_error(); - continue; - default: - throw std::runtime_error( - std::string("Failed to call RCL timer: ") + rcl_get_error_string().str); + auto this_next_time_ns = GetNextCallTimeNanoseconds(timer_cb_pair.first); + if (this_next_time_ns) { + if (*this_next_time_ns <= now) { + ready_timers_.insert(timer_cb_pair.first); + asio::post( + executor_, std::bind(&ClockManager::DispatchTimer, this, timer_cb_pair.first)); + } else if (!next_ready_time_ns || (*this_next_time_ns < *next_ready_time_ns)) { + next_ready_time_ns = this_next_time_ns; } - - // Post the user callback to be invoked later once timing-sensitive code is done. - asio::post(executor_, timer_cb_pair.second); - - // Update time until *next* call. - next_call_ns = GetNextCallNanoseconds(timer_cb_pair.first); - } - if (!next_ready_ns || (next_call_ns < *next_ready_ns)) { - next_ready_ns = next_call_ns; } } - // If we're not on debug time, we should schedule another wakeup when we anticipate the next - // timer being ready. If we are, we'll just re-check everything at the next jump callback. - if (!on_debug_time_ && next_ready_ns) { - next_update_wait_.expires_from_now(std::chrono::nanoseconds(*next_ready_ns)); + // If we posted any timers for dispatch, then we'll re-evaluate things immediately after those + // complete. Otherwise, if we're on debug time, we'll re-check everything at the next jump + // callback. If neither of those things are true, then we need to schedule a wakeup for when + // we anticipate the next timer being ready. + if (ready_timers_.empty() && !on_debug_time_ && next_ready_time_ns) { + next_update_wait_.expires_from_now(std::chrono::nanoseconds(*next_ready_time_ns - now)); next_update_wait_.async_wait([this](const asio::error_code & ec) { if (!ec) { UpdateTimers(); @@ -231,16 +220,48 @@ class RclTimersManager::ClockManager } } - /// Returns the number of nanoseconds until the next callback on the given timer is due. Value - /// may be negative or zero if callback time has already been reached. Returns std::nullopt if - /// the timer is canceled. - static std::optional GetNextCallNanoseconds(const rcl_timer_t * rcl_timer) + void DispatchTimer(rcl_timer_t * rcl_timer) + { + ready_timers_.erase(rcl_timer); + // If we've dispatched all ready timers, then trigger another update to see when the next + // timers will be ready. + if (ready_timers_.empty()) { + asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); + } + + const auto map_it = timers_.find(rcl_timer); + if (map_it == timers_.end()) { + // Perhaps the timer was removed before a pending callback could be dispatched? + return; + } + + // This notifies RCL that we're considering the timer triggered, for the purposes of updating + // the next trigger time. + const auto ret = rcl_timer_call(rcl_timer); + switch (ret) { + case RCL_RET_OK: + // Dispatch the actual user callback. + map_it->second(); + break; + case RCL_RET_TIMER_CANCELED: + // Someone canceled the timer after we queried the call time. Nevermind, then... + rcl_reset_error(); + break; + default: + throw std::runtime_error( + std::string("Failed to call RCL timer: ") + rcl_get_error_string().str); + } + } + + /// Returns the absolute time in nanoseconds when the next callback on the given timer is due. + /// Returns std::nullopt if the timer is canceled. + static std::optional GetNextCallTimeNanoseconds(const rcl_timer_t * rcl_timer) { - int64_t time_until_next_call{}; - const rcl_ret_t ret = rcl_timer_get_time_until_next_call(rcl_timer, &time_until_next_call); + int64_t next_call_time{}; + const rcl_ret_t ret = rcl_timer_get_next_call_time(rcl_timer, &next_call_time); switch (ret) { case RCL_RET_OK: - return time_until_next_call; + return next_call_time; case RCL_RET_TIMER_CANCELED: return {}; default: @@ -256,6 +277,7 @@ class RclTimersManager::ClockManager bool on_debug_time_{}; std::unordered_map> timers_; + std::unordered_set ready_timers_; asio::steady_timer next_update_wait_{executor_}; }; diff --git a/rclpy/test/test_events_executor.py b/rclpy/test/test_events_executor.py index dcd2fae3c..ecdf29684 100644 --- a/rclpy/test/test_events_executor.py +++ b/rclpy/test/test_events_executor.py @@ -47,9 +47,9 @@ class SubTestNode(rclpy.node.Node): def __init__(self, *, transient_local: bool = False): super().__init__('test_sub_node') - self._new_pub_future: rclpy.Future[ - rclpy.event_handler.QoSSubscriptionMatchedInfo - ] | None = None + self._new_pub_future: ( + rclpy.Future[rclpy.event_handler.QoSSubscriptionMatchedInfo] | None + ) = None self._received_future: rclpy.Future[test_msgs.msg.BasicTypes] | None = None self.create_subscription( test_msgs.msg.BasicTypes, @@ -79,9 +79,7 @@ def _handle_sub(self, msg: test_msgs.msg.BasicTypes) -> None: self._received_future = None future.set_result(msg) - def _handle_matched_sub( - self, info: rclpy.event_handler.QoSSubscriptionMatchedInfo - ) -> None: + def _handle_matched_sub(self, info: rclpy.event_handler.QoSSubscriptionMatchedInfo) -> None: """Handle a new publisher being matched to our subscription.""" if self._new_pub_future is not None: self._new_pub_future.set_result(info) @@ -93,9 +91,9 @@ class PubTestNode(rclpy.node.Node): def __init__(self, *, transient_local: bool = False): super().__init__('test_pub_node') - self._new_sub_future: rclpy.Future[ - rclpy.event_handler.QoSPublisherMatchedInfo - ] | None = None + self._new_sub_future: rclpy.Future[rclpy.event_handler.QoSPublisherMatchedInfo] | None = ( + None + ) self._pub = self.create_publisher( test_msgs.msg.BasicTypes, 'test_topic' + ('_transient_local' if transient_local else ''), @@ -114,9 +112,7 @@ def expect_sub_info( def publish(self, value: float) -> None: self._pub.publish(test_msgs.msg.BasicTypes(float32_value=value)) - def _handle_matched_pub( - self, info: rclpy.event_handler.QoSPublisherMatchedInfo - ) -> None: + def _handle_matched_pub(self, info: rclpy.event_handler.QoSPublisherMatchedInfo) -> None: """Handle a new subscriber being matched to our publication.""" if self._new_sub_future is not None: self._new_sub_future.set_result(info) @@ -128,13 +124,9 @@ class ServiceServerTestNode(rclpy.node.Node): def __init__(self): super().__init__('test_service_server_node') - self._got_request_future: rclpy.Future[ - test_msgs.srv.BasicTypes.Request - ] | None = None + self._got_request_future: rclpy.Future[test_msgs.srv.BasicTypes.Request] | None = None self._pending_response: test_msgs.srv.BasicTypes.Response | None = None - self.create_service( - test_msgs.srv.BasicTypes, 'test_service', self._handle_request - ) + self.create_service(test_msgs.srv.BasicTypes, 'test_service', self._handle_request) def expect_request( self, success: bool, error_msg: str @@ -171,9 +163,7 @@ def __init__(self): super().__init__('test_service_client_node') self._client = self.create_client(test_msgs.srv.BasicTypes, 'test_service') - def issue_request( - self, value: float - ) -> rclpy.Future[test_msgs.srv.BasicTypes.Response]: + def issue_request(self, value: float) -> rclpy.Future[test_msgs.srv.BasicTypes.Response]: req = test_msgs.srv.BasicTypes.Request(float32_value=value) return self._client.call_async(req) @@ -181,9 +171,7 @@ def issue_request( class TimerTestNode(rclpy.node.Node): """Node to test timer operation.""" - def __init__( - self, index: int = 0, parameter_overrides: list[rclpy.Parameter] | None = None - ): + def __init__(self, index: int = 0, parameter_overrides: list[rclpy.Parameter] | None = None): super().__init__(f'test_timer{index}', parameter_overrides=parameter_overrides) self._timer_events = 0 self._tick_future: rclpy.Future[None] | None = None @@ -197,6 +185,9 @@ def expect_tick(self) -> rclpy.Future[None]: self._tick_future = rclpy.Future() return self._tick_future + def cancel(self) -> None: + self._timer.cancel() + def _handle_timer(self) -> None: self._timer_events += 1 if self._tick_future is not None: @@ -213,9 +204,7 @@ def __init__(self): self._pub = self.create_publisher( rosgraph_msgs.msg.Clock, '/clock', - rclpy.qos.QoSProfile( - depth=1, reliability=rclpy.qos.ReliabilityPolicy.BEST_EFFORT - ), + rclpy.qos.QoSProfile(depth=1, reliability=rclpy.qos.ReliabilityPolicy.BEST_EFFORT), ) def advance_time(self, millisec: int) -> None: @@ -231,9 +220,7 @@ def __init__(self): 'test_action_server_node', parameter_overrides=[rclpy.Parameter('use_sim_time', value=True)], ) - self._got_goal_future: rclpy.Future[ - test_msgs.action.Fibonacci.Goal - ] | None = None + self._got_goal_future: rclpy.Future[test_msgs.action.Fibonacci.Goal] | None = None self._srv = rclpy.action.ActionServer( self, test_msgs.action.Fibonacci, @@ -250,9 +237,7 @@ def expect_goal(self) -> rclpy.Future[test_msgs.action.Fibonacci.Goal]: self._got_goal_future = rclpy.Future() return self._got_goal_future - def _handle_accepted( - self, goal_handle: rclpy.action.server.ServerGoalHandle - ) -> None: + def _handle_accepted(self, goal_handle: rclpy.action.server.ServerGoalHandle) -> None: self._goal_handle = goal_handle self._sequence = [0, 1] if self._got_goal_future is not None: @@ -309,19 +294,11 @@ class ActionClientTestNode(rclpy.node.Node): def __init__(self): super().__init__('test_action_client_node') - self._client = rclpy.action.ActionClient( - self, test_msgs.action.Fibonacci, 'test_action' - ) - self._feedback_future: rclpy.Future[ - test_msgs.action.Fibonacci.Feedback - ] | None = None - self._result_future: rclpy.Future[ - test_msgs.action.Fibonacci.Result - ] | None = None - - def send_goal( - self, order: int - ) -> rclpy.Future[rclpy.action.client.ClientGoalHandle]: + self._client = rclpy.action.ActionClient(self, test_msgs.action.Fibonacci, 'test_action') + self._feedback_future: rclpy.Future[test_msgs.action.Fibonacci.Feedback] | None = None + self._result_future: rclpy.Future[test_msgs.action.Fibonacci.Result] | None = None + + def send_goal(self, order: int) -> rclpy.Future[rclpy.action.client.ClientGoalHandle]: """ Send a new goal. @@ -337,9 +314,7 @@ def send_goal( goal_ack_future.add_done_callback(self._handle_goal_ack) return goal_ack_future - def _handle_goal_ack( - self, future: rclpy.Future[rclpy.action.client.ClientGoalHandle] - ) -> None: + def _handle_goal_ack(self, future: rclpy.Future[rclpy.action.client.ClientGoalHandle]) -> None: handle = future.result() assert handle is not None result_future = handle.get_result_async() @@ -526,9 +501,7 @@ def handle_message(future: rclpy.Future[test_msgs.msg.BasicTypes]) -> None: self._expect_future_done(received_future) self.assertEqual(len(received_messages), 5) for i in range(5): - self.assertAlmostEqual( - received_messages[i].float32_value, 0.1 * i, places=5 - ) + self.assertAlmostEqual(received_messages[i].float32_value, 0.1 * i, places=5) self._expect_future_not_done(received_future) pub_node.publish(0.5) @@ -546,12 +519,8 @@ def test_service(self) -> None: for i in range(300): got_response_future = client_node.issue_request(7.1) self._check_service_request_future(got_request_future, 7.1) - got_request_future = server_node.expect_request( - True, f'test response {i + 1}' - ) - self._check_service_response_future( - got_response_future, True, f'test response {i}' - ) + got_request_future = server_node.expect_request(True, f'test response {i + 1}') + self._check_service_response_future(got_response_future, True, f'test response {i}') # Destroy server node and retry issuing a request self.executor.remove_node(server_node) @@ -594,6 +563,27 @@ def test_timers(self) -> None: # Ensure the realtime timer ticked much less than the rostime one. self.assertLess(realtime_node.timer_events, rostime_node.timer_events) + # Create two timers with the same interval, both set to cancel the other from the callback. + # Only one of the callbacks should be delivered, though we can't necessarily predict which + # one. + def handler(): + nonlocal count, timer1, timer2 + count += 1 + timer1.cancel() + timer2.cancel() + + count = 0 + timer1 = rostime_node.create_timer(0.01, handler) + timer2 = rostime_node.create_timer(0.01, handler) + self._spin_for(0.0) + self.assertEqual(count, 0) + clock_node.advance_time(10) + self._spin_for(0.0) + self.assertEqual(count, 1) + clock_node.advance_time(10) + self._spin_for(0.0) + self.assertEqual(count, 1) + def test_action(self) -> None: clock_node = ClockPublisherNode() self.executor.add_node(clock_node) @@ -639,14 +629,10 @@ def test_action(self) -> None: got_goal_future = server_node.expect_goal() # Test completed goal expiration by time - self.assertEqual( - last_handle.status, action_msgs.msg.GoalStatus.STATUS_SUCCEEDED - ) + self.assertEqual(last_handle.status, action_msgs.msg.GoalStatus.STATUS_SUCCEEDED) clock_node.advance_time(9999) self._spin_for(0.2) - self.assertEqual( - last_handle.status, action_msgs.msg.GoalStatus.STATUS_SUCCEEDED - ) + self.assertEqual(last_handle.status, action_msgs.msg.GoalStatus.STATUS_SUCCEEDED) clock_node.advance_time(2) self._spin_for(0.2) self.assertEqual(last_handle.status, action_msgs.msg.GoalStatus.STATUS_UNKNOWN) From ee147116f1bd356fab32bcab29c92728ab02996c Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 24 Jan 2025 20:37:18 +0000 Subject: [PATCH 23/31] Protect against deferred method calls happening against a deleted ClockManager Signed-off-by: Brad Martin --- .../rclpy/events_executor/timers_manager.cpp | 27 ++++++++++++++----- .../rclpy/events_executor/timers_manager.hpp | 2 +- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index 7b5f34c69..b96d3c4fa 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -80,7 +80,7 @@ extern "C" void RclTimerResetTrampoline(const void * user_data, size_t) /// Manages a single clock source, and all timers operating on that source. All methods (including /// construction and destruction) are assumed to happen on the thread running the provided asio /// executor. -class RclTimersManager::ClockManager +class RclTimersManager::ClockManager : public std::enable_shared_from_this { public: ClockManager(const asio::any_io_executor & executor, rcl_clock_t * clock) @@ -101,7 +101,7 @@ class RclTimersManager::ClockManager on_debug = false; break; } - asio::post(executor_, std::bind(&ClockManager::HandleJump, this, on_debug)); + asio::post(executor_, CallIfAlive(&ClockManager::HandleJump, on_debug)); }; if ( RCL_RET_OK != @@ -113,7 +113,7 @@ class RclTimersManager::ClockManager // This isn't necessary yet but every timer will eventually depend on it. Again, this could // happen on any thread. - reset_cb_ = [this]() {asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this));}; + reset_cb_ = [this]() {asio::post(executor_, CallIfAlive(&ClockManager::UpdateTimers));}; // Initialize which timebase we're on if (clock_->type == RCL_ROS_TIME) { @@ -173,6 +173,20 @@ class RclTimersManager::ClockManager } private: + /// Returns a function suitable for being invoked later, which would invoke the given method on + /// `this` with the given args, provided that `this` still exists at that time. + template + std::function CallIfAlive(void (ClockManager::*method)(Args...), Args... args) + { + std::weak_ptr weak_this(shared_from_this()); + return [ = ]() { + auto locked = weak_this.lock(); + if (locked) { + (locked.get()->*method)(args ...); + } + }; + } + void HandleJump(bool on_debug_time) { on_debug_time_ = on_debug_time; @@ -194,8 +208,7 @@ class RclTimersManager::ClockManager if (this_next_time_ns) { if (*this_next_time_ns <= now) { ready_timers_.insert(timer_cb_pair.first); - asio::post( - executor_, std::bind(&ClockManager::DispatchTimer, this, timer_cb_pair.first)); + asio::post(executor_, CallIfAlive(&ClockManager::DispatchTimer, timer_cb_pair.first)); } else if (!next_ready_time_ns || (*this_next_time_ns < *next_ready_time_ns)) { next_ready_time_ns = this_next_time_ns; } @@ -226,7 +239,7 @@ class RclTimersManager::ClockManager // If we've dispatched all ready timers, then trigger another update to see when the next // timers will be ready. if (ready_timers_.empty()) { - asio::post(executor_, std::bind(&ClockManager::UpdateTimers, this)); + asio::post(executor_, CallIfAlive(&ClockManager::UpdateTimers)); } const auto map_it = timers_.find(rcl_timer); @@ -307,7 +320,7 @@ void RclTimersManager::AddTimer(rcl_timer_t * timer, std::function ready auto it = clock_managers_.find(clock); if (it == clock_managers_.end()) { std::tie(it, std::ignore) = clock_managers_.insert( - std::make_pair(clock, std::make_unique(executor_, clock))); + std::make_pair(clock, std::make_shared(executor_, clock))); } it->second->AddTimer(timer, ready_callback); } diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp index bdcdda779..1d7085205 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.hpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -48,7 +48,7 @@ class RclTimersManager class ClockManager; /// Handlers for each distinct clock source in the system. - std::unordered_map> clock_managers_; + std::unordered_map> clock_managers_; }; /// This class manages rclpy.Timer Python objects on behalf of EventsExecutor. From 35f4aea7924876da52a8846442bc672379354188 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 24 Jan 2025 21:13:04 +0000 Subject: [PATCH 24/31] Add support for new TimerInfo data passed to timer handlers Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 86 ++++++++++++------- .../rclpy/events_executor/events_executor.hpp | 16 ++-- .../rclpy/events_executor/timers_manager.cpp | 19 ++-- .../rclpy/events_executor/timers_manager.hpp | 11 +-- rclpy/test/test_events_executor.py | 28 ++++-- 5 files changed, 101 insertions(+), 59 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 6383274d8..570eeefb1 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -52,11 +52,13 @@ namespace events_executor EventsExecutor::EventsExecutor(py::object context) : rclpy_context_(context), inspect_iscoroutine_(py::module_::import("inspect").attr("iscoroutine")), + inspect_signature_(py::module_::import("inspect").attr("signature")), rclpy_task_(py::module_::import("rclpy.task").attr("Task")), + rclpy_timer_timer_info_(py::module_::import("rclpy.timer").attr("TimerInfo")), signals_(io_context_), rcl_callback_manager_(io_context_.get_executor()), timers_manager_( - io_context_.get_executor(), std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1)) + io_context_.get_executor(), std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1, pl::_2)) { // rclpy.Executor creates a sigint handling guard condition here. This is necessary because a // sleeping event loop won't notice Ctrl-C unless some other event wakes it up otherwise. @@ -79,10 +81,10 @@ EventsExecutor::EventsExecutor(py::object context) signals_.async_wait([this](const asio::error_code & ec, int signum) { if (!ec) { py::gil_scoped_acquire gil_acquire; - // Don't call context.try_shutdown() here, because that can call back to us to request - // a blocking shutdown(), which doesn't make any sense because we have to be spinning - // to process the callback that's asked to wait for spinning to stop. We'll have to - // call that later outside of any spin loop. + // Don't call context.try_shutdown() here, because that can call back to us to + // request a blocking shutdown(), which doesn't make any sense because we have to be + // spinning to process the callback that's asked to wait for spinning to stop. We'll + // have to call that later outside of any spin loop. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 signal_pending_.store(signum); io_context_.stop(); @@ -111,8 +113,8 @@ pybind11::object EventsExecutor::create_task( bool EventsExecutor::shutdown(std::optional timeout) { - // NOTE: The rclpy context can invoke this with a lock on the context held. Therefore we must not - // try to go access that context during this method or we can deadlock. + // NOTE: The rclpy context can invoke this with a lock on the context held. Therefore we must + // not try to go access that context during this method or we can deadlock. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/context.py#L101-L103 io_context_.stop(); @@ -144,7 +146,8 @@ bool EventsExecutor::add_node(py::object node) return false; } nodes_.add(node); - // Caution, the Node executor setter method calls executor.add_node() again making this reentrant. + // Caution, the Node executor setter method calls executor.add_node() again making this + // reentrant. node.attr("executor") = py::cast(this); wake(); return true; @@ -263,8 +266,8 @@ void EventsExecutor::exit(py::object, py::object, py::object) {shutdown();} void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) { - // Clear pending flag as early as possible, so we error on the side of retriggering a few harmless - // updates rather than potentially missing important additions. + // Clear pending flag as early as possible, so we error on the side of retriggering a few + // harmless updates rather than potentially missing important additions. wake_pending_.store(false); // Collect all entities currently associated with our nodes @@ -409,24 +412,41 @@ void EventsExecutor::HandleAddedTimer(py::handle timer) {timers_manager_.AddTime void EventsExecutor::HandleRemovedTimer(py::handle timer) {timers_manager_.RemoveTimer(timer);} -void EventsExecutor::HandleTimerReady(py::handle timer) +void EventsExecutor::HandleTimerReady(py::handle timer, const rcl_timer_call_info_t & info) { py::gil_scoped_acquire gil_acquire; - + py::object callback = timer.attr("callback"); + // We need to distinguish callbacks that want a TimerInfo object from those that don't. + // Executor._take_timer() actually checks if an argument has type markup expecting a TypeInfo + // object. This seems like overkill, vs just checking if it wants an argument at all? + py::object py_info; + if (py::len(inspect_signature_(callback).attr("parameters").attr("values")()) > 0) { + using py::literals::operator""_a; + py_info = rclpy_timer_timer_info_( + "expected_call_time"_a = info.expected_call_time, + "actual_call_time"_a = info.actual_call_time, + "clock_type"_a = timer.attr("clock").attr("clock_type")); + } + py::object result; try { - // The type markup claims this can't be a coroutine, but this seems to be a lie because the unit - // test does exactly that. - py::object result = timer.attr("callback")(); - if (py::cast(inspect_iscoroutine_(result))) { - // Create a Task to manage iteration of this coroutine later. - create_task(result); + if (py_info) { + result = callback(py_info); } else { - ran_user_ = true; + result = callback(); } } catch (const py::error_already_set & e) { HandleCallbackExceptionInNodeEntity(e, timer, "timers"); throw; } + + // The type markup claims the callback can't be a coroutine, but this seems to be a lie because + // the unit test does exactly that. + if (py::cast(inspect_iscoroutine_(result))) { + // Create a Task to manage iteration of this coroutine later. + create_task(result); + } else { + ran_user_ = true; + } } void EventsExecutor::HandleAddedClient(py::handle client) @@ -559,12 +579,12 @@ void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_eve void EventsExecutor::HandleAddedWaitable(py::handle waitable) { - // The Waitable API is too abstract for us to work with directly; it only exposes APIs for dealing - // with wait sets, and all of the rcl callback API requires knowing exactly what kinds of rcl - // objects you're working with. We'll try to figure out what kind of stuff is hiding behind the - // abstraction by having the Waitable add itself to a wait set, then take stock of what all ended - // up there. We'll also have to hope that no Waitable implementations ever change their component - // entities over their lifetimes. + // The Waitable API is too abstract for us to work with directly; it only exposes APIs for + // dealing with wait sets, and all of the rcl callback API requires knowing exactly what kinds of + // rcl objects you're working with. We'll try to figure out what kind of stuff is hiding behind + // the abstraction by having the Waitable add itself to a wait set, then take stock of what all + // ended up there. We'll also have to hope that no Waitable implementations ever change their + // component entities over their lifetimes. auto with_waitable = std::make_shared(waitable); const py::object num_entities = waitable.attr("get_num_entities")(); if (py::cast(num_entities.attr("num_guard_conditions")) != 0) { @@ -580,10 +600,10 @@ void EventsExecutor::HandleAddedWaitable(py::handle waitable) auto with_waitset = std::make_shared(py::cast(wait_set)); waitable.attr("add_to_wait_set")(wait_set); rcl_wait_set_t * const rcl_waitset = wait_set->rcl_ptr(); - // We null out each entry in the waitset as we set it up, so that the waitset itself can be reused - // when something becomes ready to signal to the Waitable what's ready and what's not. We also - // bind with_waitset into each callback we set up, to ensure that object doesn't get destroyed - // while any of these callbacks are still registered. + // We null out each entry in the waitset as we set it up, so that the waitset itself can be + // reused when something becomes ready to signal to the Waitable what's ready and what's not. We + // also bind with_waitset into each callback we set up, to ensure that object doesn't get + // destroyed while any of these callbacks are still registered. WaitableSubEntities sub_entities; for (size_t i = 0; i < rcl_waitset->size_of_subscriptions; ++i) { const rcl_subscription_t * const rcl_sub = rcl_waitset->subscriptions[i]; @@ -809,8 +829,8 @@ void EventsExecutor::HandleWaitableReady( future.attr("_set_executor")(py::cast(this)); } for (size_t i = 0; i < number_of_events; ++i) { - // This method can have side effects, so it needs to be called even though it looks like just an - // accessor. + // This method can have side effects, so it needs to be called even though it looks like just + // an accessor. if (!is_ready(wait_set)) { throw std::runtime_error("Failed to make Waitable ready"); } @@ -833,8 +853,8 @@ void EventsExecutor::IterateTask(py::handle task) task.dec_ref(); if (!ex.is_none()) { - // It's not clear how to easily turn a Python exception into a C++ one, so let's just throw it - // again and let pybind translate it normally. + // It's not clear how to easily turn a Python exception into a C++ one, so let's just throw + // it again and let pybind translate it normally. try { Raise(ex); } catch (py::error_already_set & cpp_ex) { diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 40cbb3603..901335545 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -46,8 +46,8 @@ namespace events_executor /// Events executor implementation for rclpy /// -/// This executor implementation attempts to replicate the function of the rclcpp EventsExecutor for -/// the benefit of rclpy applications. It is implemented in C++ to minimize the overhead of +/// This executor implementation attempts to replicate the function of the rclcpp EventsExecutor +/// for the benefit of rclpy applications. It is implemented in C++ to minimize the overhead of /// processing the event loop. /// /// We assume all public methods could be invoked from any thread. Callbacks on the executor loop @@ -108,7 +108,7 @@ class EventsExecutor void HandleAddedTimer(pybind11::handle); void HandleRemovedTimer(pybind11::handle); - void HandleTimerReady(pybind11::handle); + void HandleTimerReady(pybind11::handle, const rcl_timer_call_info_t &); void HandleAddedClient(pybind11::handle); void HandleRemovedClient(pybind11::handle); @@ -121,8 +121,9 @@ class EventsExecutor void HandleAddedWaitable(pybind11::handle); void HandleRemovedWaitable(pybind11::handle); void HandleWaitableSubReady( - pybind11::handle waitable, const rcl_subscription_t *, std::shared_ptr wait_set, - size_t wait_set_sub_index, std::shared_ptr with_waitset, size_t number_of_events); + pybind11::handle waitable, const rcl_subscription_t *, + std::shared_ptr wait_set, size_t wait_set_sub_index, + std::shared_ptr with_waitset, size_t number_of_events); void HandleWaitableTimerReady( pybind11::handle waitable, const rcl_timer_t *, std::shared_ptr wait_set, size_t wait_set_timer_index, std::shared_ptr with_waitable, @@ -137,7 +138,8 @@ class EventsExecutor size_t number_of_events); void HandleWaitableEventReady( pybind11::handle waitable, const rcl_event_t *, std::shared_ptr wait_set, - size_t wait_set_event_index, std::shared_ptr with_waitset, size_t number_of_events); + size_t wait_set_event_index, std::shared_ptr with_waitset, + size_t number_of_events); void HandleWaitableReady( pybind11::handle waitable, std::shared_ptr wait_set, size_t number_of_events); @@ -161,7 +163,9 @@ class EventsExecutor // Imported python objects we depend on const pybind11::object inspect_iscoroutine_; + const pybind11::object inspect_signature_; const pybind11::object rclpy_task_; + const pybind11::object rclpy_timer_timer_info_; asio::io_context io_context_; asio::signal_set signals_; diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index b96d3c4fa..7551e75dc 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -29,6 +29,7 @@ #include "timer.hpp" +namespace pl = std::placeholders; namespace py = pybind11; namespace rclpy @@ -138,7 +139,8 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_this ready_callback) + void AddTimer( + rcl_timer_t * timer, std::function ready_callback) { // All timers have the same reset callback if ( @@ -250,11 +252,12 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_thissecond(); + map_it->second(info); break; case RCL_RET_TIMER_CANCELED: // Someone canceled the timer after we queried the call time. Nevermind, then... @@ -289,7 +292,7 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_this> timers_; + std::unordered_map> timers_; std::unordered_set ready_timers_; asio::steady_timer next_update_wait_{executor_}; }; @@ -312,7 +315,8 @@ rcl_clock_t * GetTimerClock(rcl_timer_t * timer) } } // namespace -void RclTimersManager::AddTimer(rcl_timer_t * timer, std::function ready_callback) +void RclTimersManager::AddTimer( + rcl_timer_t * timer, std::function ready_callback) { // Figure out the clock this timer is using, make sure a manager exists for that clock, then // forward the timer to that clock's manager. @@ -339,7 +343,8 @@ void RclTimersManager::RemoveTimer(rcl_timer_t * timer) } TimersManager::TimersManager( - const asio::any_io_executor & executor, std::function timer_ready_callback) + const asio::any_io_executor & executor, + std::function timer_ready_callback) : rcl_manager_(executor), ready_callback_(timer_ready_callback) { } @@ -352,7 +357,7 @@ void TimersManager::AddTimer(py::handle timer) py::handle handle = timer.attr("handle"); mapping.with = std::make_unique(handle); mapping.rcl_ptr = py::cast(handle).rcl_ptr(); - rcl_manager_.AddTimer(mapping.rcl_ptr, std::bind(ready_callback_, timer)); + rcl_manager_.AddTimer(mapping.rcl_ptr, std::bind(ready_callback_, timer, pl::_1)); timer_mappings_[timer] = std::move(mapping); } diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp index 1d7085205..6ef1d0e1f 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.hpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -40,7 +40,7 @@ class RclTimersManager explicit RclTimersManager(const asio::any_io_executor &); ~RclTimersManager(); - void AddTimer(rcl_timer_t *, std::function ready_callback); + void AddTimer(rcl_timer_t *, std::function ready_callback); void RemoveTimer(rcl_timer_t *); private: @@ -55,10 +55,11 @@ class RclTimersManager class TimersManager { public: - /// @p timer_ready_callback will be invoked with the timer handle whenever a managed timer is - /// ready for servicing. + /// @p timer_ready_callback will be invoked with the timer handle and info whenever a managed + /// timer is ready for servicing. TimersManager( - const asio::any_io_executor &, std::function timer_ready_callback); + const asio::any_io_executor &, + std::function timer_ready_callback); ~TimersManager(); /// Accessor for underlying rcl timer manager, for management of non-Python timers. @@ -80,7 +81,7 @@ class TimersManager }; RclTimersManager rcl_manager_; - const std::function ready_callback_; + const std::function ready_callback_; std::unordered_map timer_mappings_; }; diff --git a/rclpy/test/test_events_executor.py b/rclpy/test/test_events_executor.py index ecdf29684..2f56e2a92 100644 --- a/rclpy/test/test_events_executor.py +++ b/rclpy/test/test_events_executor.py @@ -18,6 +18,7 @@ import action_msgs.msg import rclpy.action +import rclpy.clock_type import rclpy.duration import rclpy.event_handler import rclpy.executors @@ -25,6 +26,7 @@ import rclpy.node import rclpy.qos import rclpy.time +import rclpy.timer import rosgraph_msgs.msg import test_msgs.action import test_msgs.msg @@ -174,24 +176,22 @@ class TimerTestNode(rclpy.node.Node): def __init__(self, index: int = 0, parameter_overrides: list[rclpy.Parameter] | None = None): super().__init__(f'test_timer{index}', parameter_overrides=parameter_overrides) self._timer_events = 0 - self._tick_future: rclpy.Future[None] | None = None + self._tick_future: rclpy.Future[rclpy.timer.TimerInfo] | None = None self._timer = self.create_timer(0.1, self._handle_timer) @property def timer_events(self) -> int: return self._timer_events - def expect_tick(self) -> rclpy.Future[None]: + def expect_tick(self) -> rclpy.Future[rclpy.timer.TimerInfo]: + """Get future on TimerInfo for an anticipated timer tick.""" self._tick_future = rclpy.Future() return self._tick_future - def cancel(self) -> None: - self._timer.cancel() - - def _handle_timer(self) -> None: + def _handle_timer(self, info: rclpy.timer.TimerInfo) -> None: self._timer_events += 1 if self._tick_future is not None: - self._tick_future.set_result(None) + self._tick_future.set_result(info) self._tick_future = None @@ -200,7 +200,7 @@ class ClockPublisherNode(rclpy.node.Node): def __init__(self): super().__init__('clock_node') - self._now = rclpy.time.Time() + self._now = rclpy.time.Time(clock_type=rclpy.clock_type.ClockType.ROS_TIME) self._pub = self.create_publisher( rosgraph_msgs.msg.Clock, '/clock', @@ -211,6 +211,10 @@ def advance_time(self, millisec: int) -> None: self._now += rclpy.duration.Duration(nanoseconds=millisec * 1000000) self._pub.publish(rosgraph_msgs.msg.Clock(clock=self._now.to_msg())) + @property + def now(self) -> rclpy.time.Time: + return self._now + class ActionServerTestNode(rclpy.node.Node): """Node to test action server-side operation.""" @@ -541,11 +545,15 @@ def test_timers(self) -> None: # Wait a bit, and make sure the realtime timer ticks, and the rostime one does # not. Since this is based on wall time, be very flexible on tolerances here. + realtime_tick_future = realtime_node.expect_tick() self._spin_for(1.0) realtime_ticks = realtime_node.timer_events self.assertGreater(realtime_ticks, 1) self.assertLess(realtime_ticks, 50) self.assertEqual(rostime_node.timer_events, 0) + info = realtime_tick_future.result() + assert info is not None + self.assertGreaterEqual(info.actual_call_time, info.expected_call_time) # Manually tick the rostime timer by less than a full interval. rostime_tick_future = rostime_node.expect_tick() @@ -554,6 +562,10 @@ def test_timers(self) -> None: self._expect_future_not_done(rostime_tick_future) clock_node.advance_time(1) self._expect_future_done(rostime_tick_future) + info = rostime_tick_future.result() + assert info is not None + self.assertEqual(info.actual_call_time, info.expected_call_time) + self.assertEqual(info.actual_call_time, clock_node.now) # Now tick by a bunch of full intervals. for _ in range(300): rostime_tick_future = rostime_node.expect_tick() From df3dbb70131d791261ed31ce4f629b3948dc85fe Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 24 Jan 2025 22:25:30 +0000 Subject: [PATCH 25/31] Simplify spin_once() implementation This both reduces duplicate code now, and simplifies the asio interface used which would need replacing. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 134 +++++++----------- .../rclpy/events_executor/events_executor.hpp | 15 +- 2 files changed, 61 insertions(+), 88 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 570eeefb1..47898f122 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -182,13 +182,14 @@ py::list EventsExecutor::get_nodes() const {return nodes_;} // specific node and more than one node may be connected to an executor instance. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/executors.py#L184-L185 -void EventsExecutor::spin(std::optional timeout_sec) +void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_user_callback) { { std::unique_lock spin_lock(spinning_mutex_, std::try_to_lock); if (!spin_lock) { throw std::runtime_error("Attempt to spin an already-spinning Executor"); } + stop_after_user_callback_ = stop_after_user_callback; // Release the GIL while we block. Any callbacks on the io_context that want to touch Python // will need to reacquire it though. py::gil_scoped_release gil_release; @@ -208,57 +209,26 @@ void EventsExecutor::spin(std::optional timeout_sec) io_context_.restart(); } - CheckForSignals(); -} - -void EventsExecutor::spin_once(std::optional timeout_sec) -{ - { - std::unique_lock spin_lock(spinning_mutex_, std::try_to_lock); - if (!spin_lock) { - throw std::runtime_error("Attempt to spin an already-spinning Executor"); - } - // Release the GIL while we block. Any callbacks on the io_context that want to touch Python - // will need to reacquire it though. - py::gil_scoped_release gil_release; - // Don't let asio auto stop if there's nothing to do - const auto work = asio::make_work_guard(io_context_); - // We can't just use asio run_one*() here, because 'one' asio callback might include some - // internal housekeeping, and not a user-visible callback that was intended when the Executor - // was asked to dispatch 'once'. - ran_user_ = false; - if (timeout_sec) { - const auto timeout_ns = std::chrono::duration_cast( - std::chrono::duration(*timeout_sec)); - const auto end = std::chrono::steady_clock::now() + timeout_ns; - // Dispatch anything that's immediately ready, even with zero timeout - while (io_context_.poll_one() && !ran_user_) { - } - // Now possibly block until the end of the timeout period - while (!ran_user_ && io_context_.run_one_until(end)) { - } - } else { - while (io_context_.run_one() && !ran_user_) { - } + const int signum = signal_pending_.exchange(0); + if (signum) { + rclpy_context_.attr("try_shutdown")(); + if (signum == SIGINT) { + PyErr_SetInterrupt(); + return; } - io_context_.restart(); } - CheckForSignals(); + const bool ok = py::cast(rclpy_context_.attr("ok")()); + if (!ok) { + Raise(py::module_::import("rclpy.executors").attr("ExternalShutdownException")()); + } } void EventsExecutor::spin_until_future_complete( - py::handle future, std::optional timeout_sec) + py::handle future, std::optional timeout_sec, bool stop_after_user_callback) { future.attr("add_done_callback")(py::cpp_function([this](py::handle) {io_context_.stop();})); - spin(timeout_sec); -} - -void EventsExecutor::spin_once_until_future_complete( - py::handle future, std::optional timeout_sec) -{ - future.attr("add_done_callback")(py::cpp_function([this](py::handle) {io_context_.stop();})); - spin_once(timeout_sec); + spin(timeout_sec, stop_after_user_callback); } EventsExecutor * EventsExecutor::enter() {return this;} @@ -344,9 +314,9 @@ void EventsExecutor::HandleAddedSubscription(py::handle subscription) const rcl_subscription_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleSubscriptionReady, this, subscription, pl::_1); if ( - RCL_RET_OK != - rcl_subscription_set_on_new_message_callback( - rcl_ptr, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + RCL_RET_OK != rcl_subscription_set_on_new_message_callback( + rcl_ptr, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { throw std::runtime_error( std::string("Failed to set the on new message callback for subscription: ") + @@ -368,7 +338,9 @@ void EventsExecutor::HandleRemovedSubscription(py::handle subscription) void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t number_of_events) { - ran_user_ = true; + if (stop_after_user_callback_) { + io_context_.stop(); + } py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_subscription() and _execute_subcription(). @@ -444,8 +416,8 @@ void EventsExecutor::HandleTimerReady(py::handle timer, const rcl_timer_call_inf if (py::cast(inspect_iscoroutine_(result))) { // Create a Task to manage iteration of this coroutine later. create_task(result); - } else { - ran_user_ = true; + } else if (stop_after_user_callback_) { + io_context_.stop(); } } @@ -456,9 +428,9 @@ void EventsExecutor::HandleAddedClient(py::handle client) const rcl_client_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleClientReady, this, client, pl::_1); if ( - RCL_RET_OK != - rcl_client_set_on_new_response_callback( - rcl_ptr, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + RCL_RET_OK != rcl_client_set_on_new_response_callback( + rcl_ptr, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { throw std::runtime_error( std::string("Failed to set the on new response callback for client: ") + @@ -480,7 +452,9 @@ void EventsExecutor::HandleRemovedClient(py::handle client) void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_events) { - ran_user_ = true; + if (stop_after_user_callback_) { + io_context_.stop(); + } py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_client() and _execute_client(). @@ -524,9 +498,9 @@ void EventsExecutor::HandleAddedService(py::handle service) const rcl_service_t * rcl_ptr = py::cast(handle).rcl_ptr(); const auto cb = std::bind(&EventsExecutor::HandleServiceReady, this, service, pl::_1); if ( - RCL_RET_OK != - rcl_service_set_on_new_request_callback( - rcl_ptr, RclEventCallbackTrampoline, rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) + RCL_RET_OK != rcl_service_set_on_new_request_callback( + rcl_ptr, RclEventCallbackTrampoline, + rcl_callback_manager_.MakeCallback(rcl_ptr, cb, with))) { throw std::runtime_error( std::string("Failed to set the on new request callback for service: ") + @@ -548,7 +522,9 @@ void EventsExecutor::HandleRemovedService(py::handle service) void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_events) { - ran_user_ = true; + if (stop_after_user_callback_) { + io_context_.stop(); + } py::gil_scoped_acquire gil_acquire; // Largely based on rclpy.Executor._take_service() and _execute_service(). @@ -818,7 +794,9 @@ void EventsExecutor::HandleWaitableEventReady( void EventsExecutor::HandleWaitableReady( py::handle waitable, std::shared_ptr wait_set, size_t number_of_events) { - ran_user_ = true; + if (stop_after_user_callback_) { + io_context_.stop(); + } // Largely based on rclpy.Executor._take_waitable() // https://github.com/ros2/rclpy/blob/a19180c238d4d97ed2b58868d8fb7fa3e3b621f2/rclpy/rclpy/executors.py#L447-L454 py::object is_ready = waitable.attr("is_ready"); @@ -842,7 +820,9 @@ void EventsExecutor::HandleWaitableReady( void EventsExecutor::IterateTask(py::handle task) { - ran_user_ = true; + if (stop_after_user_callback_) { + io_context_.stop(); + } py::gil_scoped_acquire gil_acquire; // Calling this won't throw, but it may set the exception property on the task object. task(); @@ -925,23 +905,6 @@ void EventsExecutor::Raise(py::object ex) py::exec("raise ex", scope); } -void EventsExecutor::CheckForSignals() -{ - const int signum = signal_pending_.exchange(0); - if (signum) { - rclpy_context_.attr("try_shutdown")(); - if (signum == SIGINT) { - PyErr_SetInterrupt(); - return; - } - } - - const bool ok = py::cast(rclpy_context_.attr("ok")()); - if (!ok) { - Raise(py::module_::import("rclpy.executors").attr("ExternalShutdownException")()); - } -} - // pybind11 module bindings void define_events_executor(py::object module) @@ -956,12 +919,23 @@ void define_events_executor(py::object module) .def("wake", &EventsExecutor::wake) .def("get_nodes", &EventsExecutor::get_nodes) .def("spin", [](EventsExecutor & exec) {exec.spin();}) - .def("spin_once", &EventsExecutor::spin_once, py::arg("timeout_sec") = py::none()) .def( - "spin_until_future_complete", &EventsExecutor::spin_until_future_complete, py::arg("future"), + "spin_once", + [](EventsExecutor & exec, std::optional timeout_sec) { + exec.spin(timeout_sec, true); + }, py::arg("timeout_sec") = py::none()) .def( - "spin_once_until_future_complete", &EventsExecutor::spin_once_until_future_complete, + "spin_until_future_complete", + [](EventsExecutor & exec, py::handle future, std::optional timeout_sec) { + exec.spin_until_future_complete(future, timeout_sec); + }, + py::arg("future"), py::arg("timeout_sec") = py::none()) + .def( + "spin_once_until_future_complete", + [](EventsExecutor & exec, py::handle future, std::optional timeout_sec) { + exec.spin_until_future_complete(future, timeout_sec, true); + }, py::arg("future"), py::arg("timeout_sec") = py::none()) .def("__enter__", &EventsExecutor::enter) .def("__exit__", &EventsExecutor::exit); diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 901335545..4f4f25f3b 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -70,11 +70,10 @@ class EventsExecutor void remove_node(pybind11::handle node); void wake(); pybind11::list get_nodes() const; - void spin(std::optional timeout_sec = {}); - void spin_once(std::optional timeout_sec = {}); - void spin_until_future_complete(pybind11::handle future, std::optional timeout_sec = {}); - void spin_once_until_future_complete( - pybind11::handle future, std::optional timeout_sec = {}); + void spin(std::optional timeout_sec = {}, bool stop_after_user_callback = false); + void spin_until_future_complete( + pybind11::handle future, std::optional timeout_sec = {}, + bool stop_after_user_callback = false); EventsExecutor * enter(); void exit(pybind11::object, pybind11::object, pybind11::object); @@ -175,9 +174,9 @@ class EventsExecutor std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning std::atomic signal_pending_{}; ///< Signal number of caught signal, 0 if none - /// This flag is used by spin_once() to determine if a user-visible callback has been dispatched - /// during its operation. - bool ran_user_{}; + /// This flag is used by spin_once() to signal that the io_context_ should be stopped after a + /// single user-visible callback has been dispatched. + bool stop_after_user_callback_{}; // Collection of awaitable entities we're servicing pybind11::set subscriptions_; From 168c5cc66ce9d0286871a72dea462f7fe7317353 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 24 Jan 2025 22:51:10 +0000 Subject: [PATCH 26/31] Fix stale Future done callbacks with spin_until_future_complete() This method can't be allowed to leave its Future done callback outstanding in case the method is returning for a reason other than the Future being done. Signed-off-by: Brad Martin --- rclpy/rclpy/task.py | 14 ++++++++++++++ .../src/rclpy/events_executor/events_executor.cpp | 9 ++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/rclpy/rclpy/task.py b/rclpy/rclpy/task.py index 81a56ab5b..e59d2fd61 100644 --- a/rclpy/rclpy/task.py +++ b/rclpy/rclpy/task.py @@ -195,6 +195,20 @@ def add_done_callback(self, callback: Callable[['Future[T]'], None]) -> None: if invoke: callback(self) + def remove_done_callback(self, callback: Callable[['Future[T]'], None]) -> bool: + """ + Remove a previously-added done callback. + + Returns true if the given callback was found and removed. Always fails if the Future was + already complete. + """ + with self._lock: + try: + self._callbacks.remove(callback) + except ValueError: + return False + return True + class Task(Future[T]): """ diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 47898f122..7c9d31239 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -227,8 +227,15 @@ void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_use void EventsExecutor::spin_until_future_complete( py::handle future, std::optional timeout_sec, bool stop_after_user_callback) { - future.attr("add_done_callback")(py::cpp_function([this](py::handle) {io_context_.stop();})); + py::cpp_function cb([this](py::handle) {io_context_.stop();}); + future.attr("add_done_callback")(cb); spin(timeout_sec, stop_after_user_callback); + // In case the future didn't complete (we hit the timeout or dispatched a different user callback + // after being asked to only run one), we need to clean up our callback; otherwise, it could fire + // later when the executor isn't valid, or we haven't been asked to wait for this future; also, + // we could end up adding a bunch more of these same callbacks if this method gets invoked in a + // loop. + future.attr("remove_done_callback")(cb); } EventsExecutor * EventsExecutor::enter() {return this;} From b9e5240621371620d4e314165103c34525663fa8 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Mon, 27 Jan 2025 23:09:32 +0000 Subject: [PATCH 27/31] Use existing rclpy signal handling instead of asio Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 45 +--------------- .../rclpy/events_executor/events_executor.hpp | 10 ++-- rclpy/src/rclpy/signal_handler.cpp | 53 +++++++++++++++++++ rclpy/src/rclpy/signal_handler.hpp | 18 +++++++ 4 files changed, 77 insertions(+), 49 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 7c9d31239..d04d2b8af 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -55,42 +55,12 @@ EventsExecutor::EventsExecutor(py::object context) inspect_signature_(py::module_::import("inspect").attr("signature")), rclpy_task_(py::module_::import("rclpy.task").attr("Task")), rclpy_timer_timer_info_(py::module_::import("rclpy.timer").attr("TimerInfo")), - signals_(io_context_), + signal_callback_([this]() {io_context_.stop();}), + work_(asio::make_work_guard(io_context_.get_executor())), rcl_callback_manager_(io_context_.get_executor()), timers_manager_( io_context_.get_executor(), std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1, pl::_2)) { - // rclpy.Executor creates a sigint handling guard condition here. This is necessary because a - // sleeping event loop won't notice Ctrl-C unless some other event wakes it up otherwise. - // - // Unfortunately it doesn't look like we can either support generic guard conditions or hook into - // the existing rclpy signal handling in any other useful way. We'll just establish our own - // signal handling directly instead. We can at least hook into the rclpy.init() options that - // allow a user to disable certain signal handlers. - auto rclpy_signals = py::module_::import("rclpy.signals"); - const int signal_options = - py::cast(rclpy_signals.attr("get_current_signal_handlers_options")()); - const auto sig_ops_enum = rclpy_signals.attr("SignalHandlerOptions"); - if (signal_options & py::cast(sig_ops_enum.attr("SIGINT"))) { - signals_.add(SIGINT); - } - if (signal_options & py::cast(sig_ops_enum.attr("SIGTERM"))) { - signals_.add(SIGTERM); - } - if (signal_options != py::cast(sig_ops_enum.attr("NO"))) { - signals_.async_wait([this](const asio::error_code & ec, int signum) { - if (!ec) { - py::gil_scoped_acquire gil_acquire; - // Don't call context.try_shutdown() here, because that can call back to us to - // request a blocking shutdown(), which doesn't make any sense because we have to be - // spinning to process the callback that's asked to wait for spinning to stop. We'll - // have to call that later outside of any spin loop. - // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/__init__.py#L105-L109 - signal_pending_.store(signum); - io_context_.stop(); - } - }); - } } EventsExecutor::~EventsExecutor() {shutdown();} @@ -193,8 +163,6 @@ void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_use // Release the GIL while we block. Any callbacks on the io_context that want to touch Python // will need to reacquire it though. py::gil_scoped_release gil_release; - // Don't let asio auto stop if there's nothing to do - const auto work = asio::make_work_guard(io_context_); if (timeout_sec) { const auto timeout_ns = std::chrono::duration_cast( std::chrono::duration(*timeout_sec)); @@ -209,15 +177,6 @@ void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_use io_context_.restart(); } - const int signum = signal_pending_.exchange(0); - if (signum) { - rclpy_context_.attr("try_shutdown")(); - if (signum == SIGINT) { - PyErr_SetInterrupt(); - return; - } - } - const bool ok = py::cast(rclpy_context_.attr("ok")()); if (!ok) { Raise(py::module_::import("rclpy.executors").attr("ExternalShutdownException")()); diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 4f4f25f3b..2e9ddc819 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -32,11 +32,11 @@ #include #include -#include #include "events_executor/rcl_support.hpp" #include "events_executor/scoped_with.hpp" #include "events_executor/timers_manager.hpp" +#include "signal_handler.hpp" #include "wait_set.hpp" namespace rclpy @@ -155,9 +155,6 @@ class EventsExecutor /// Raises the given python object instance as a Python exception void Raise(pybind11::object); - /// Handles any pending signals; needs to be performed at the end of every spin*() method. - void CheckForSignals(); - const pybind11::object rclpy_context_; // Imported python objects we depend on @@ -167,12 +164,13 @@ class EventsExecutor const pybind11::object rclpy_timer_timer_info_; asio::io_context io_context_; - asio::signal_set signals_; + ScopedSignalCallback signal_callback_; + // Never let asio auto stop if there's nothing to do + asio::executor_work_guard work_; pybind11::set nodes_; ///< The set of all nodes we're executing std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning - std::atomic signal_pending_{}; ///< Signal number of caught signal, 0 if none /// This flag is used by spin_once() to signal that the io_context_ should be stopped after a /// single user-visible callback has been dispatched. diff --git a/rclpy/src/rclpy/signal_handler.cpp b/rclpy/src/rclpy/signal_handler.cpp index 369060f50..bb7276312 100644 --- a/rclpy/src/rclpy/signal_handler.cpp +++ b/rclpy/src/rclpy/signal_handler.cpp @@ -23,6 +23,8 @@ #include #include +#include +#include #include #include #include @@ -43,6 +45,7 @@ namespace py = pybind11; static bool trigger_guard_conditions(); +static void invoke_callbacks(); namespace { @@ -142,6 +145,11 @@ setup_deferred_signal_handler() if (g_signal_handler_installed.load()) { trigger_guard_conditions(); rclpy::shutdown_contexts(); + // TODO(bmartin427) It feels like I want to do this *after* the contexts are shut down, + // to ensure the callbacks see the shut down state, and we don't end up going back to + // spin again. But I'm not sure why the same argument doesn't hold for the guard + // conditions above. + invoke_callbacks(); } } }); @@ -610,6 +618,51 @@ uninstall_signal_handlers() teardown_deferred_signal_handler(); } +// Storage for signal callbacks. +// TODO(bmartin427) The management of g_guard_conditions seems overly complex. That variable is +// never touched directly from a signal handler as it has a separate thread on which the guard +// conditions are triggered; however, there's an awful lot of code jumping through hoops that seem +// unnecessary with that in mind. I won't model the same behavior for the registered callbacks. +std::list> g_callbacks; +std::mutex g_callback_mutex; +class ScopedSignalCallback::Impl +{ +public: + explicit Impl(std::function callback) + { + std::lock_guard lock(g_callback_mutex); + iterator_ = g_callbacks.insert(g_callbacks.end(), callback); + } + + ~Impl() + { + std::lock_guard lock(g_callback_mutex); + g_callbacks.erase(iterator_); + } + +private: + std::list>::iterator iterator_; +}; + +ScopedSignalCallback::ScopedSignalCallback(std::function cb) +: impl_(std::make_shared(cb)) +{ +} + +ScopedSignalCallback::~ScopedSignalCallback() {} + +} // namespace rclpy + +static void invoke_callbacks() +{ + std::lock_guard lock(rclpy::g_callback_mutex); + for (auto & cb : rclpy::g_callbacks) { + cb(); + } +} + +namespace rclpy +{ void define_signal_handler_api(py::module m) { diff --git a/rclpy/src/rclpy/signal_handler.hpp b/rclpy/src/rclpy/signal_handler.hpp index e336c2a4c..e5250b8af 100644 --- a/rclpy/src/rclpy/signal_handler.hpp +++ b/rclpy/src/rclpy/signal_handler.hpp @@ -17,15 +17,33 @@ #include +#include +#include + namespace py = pybind11; namespace rclpy { + +/// Register a C++ callback to be invoked when a signal is caught. These callbacks will be invoked +/// on an arbitrary thread. The callback will be automatically unregistered if the object goes out +/// of scope. +class ScopedSignalCallback { +public: + explicit ScopedSignalCallback(std::function); + ~ScopedSignalCallback(); + +private: + class Impl; + std::shared_ptr impl_; +}; + /// Define methods on a module for working with signal handlers /** * \param[in] module a pybind11 module to add the definition to */ void define_signal_handler_api(py::module module); + } // namespace rclpy #endif // RCLPY__SIGNAL_HANDLER_HPP_ From 095965817574cf784544e892f96cd9e08a3d2619 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Tue, 28 Jan 2025 04:55:41 +0000 Subject: [PATCH 28/31] Replace asio timers with a dedicated timer wait thread This is dumb on its own, but it helps me move towards eliminating asio. Signed-off-by: Brad Martin --- rclpy/CMakeLists.txt | 1 + .../events_executor/delayed_event_thread.cpp | 74 +++++++++++++++++++ .../events_executor/delayed_event_thread.hpp | 60 +++++++++++++++ .../rclpy/events_executor/timers_manager.cpp | 31 ++++---- 4 files changed, 150 insertions(+), 16 deletions(-) create mode 100644 rclpy/src/rclpy/events_executor/delayed_event_thread.cpp create mode 100644 rclpy/src/rclpy/events_executor/delayed_event_thread.hpp diff --git a/rclpy/CMakeLists.txt b/rclpy/CMakeLists.txt index cd6905035..3542ab993 100644 --- a/rclpy/CMakeLists.txt +++ b/rclpy/CMakeLists.txt @@ -92,6 +92,7 @@ pybind11_add_module(_rclpy_pybind11 SHARED src/rclpy/destroyable.cpp src/rclpy/duration.cpp src/rclpy/clock_event.cpp + src/rclpy/events_executor/delayed_event_thread.cpp src/rclpy/events_executor/events_executor.cpp src/rclpy/events_executor/rcl_support.cpp src/rclpy/events_executor/timers_manager.cpp diff --git a/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp b/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp new file mode 100644 index 000000000..bbfc120cf --- /dev/null +++ b/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp @@ -0,0 +1,74 @@ +// Copyright 2025 Brad Martin +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "events_executor/delayed_event_thread.hpp" + +#include + +#include + +namespace rclpy +{ +namespace events_executor +{ + +DelayedEventThread::DelayedEventThread(const asio::any_io_executor & executor) +: executor_(executor), thread_([this]() {RunThread();}) +{ +} + +DelayedEventThread::~DelayedEventThread() +{ + { + std::unique_lock lock(mutex_); + done_ = true; + } + cv_.notify_one(); + thread_.join(); +} + +void DelayedEventThread::EnqueueAt( + std::chrono::steady_clock::time_point when, std::function handler) +{ + { + std::unique_lock lock(mutex_); + when_ = when; + handler_ = handler; + } + cv_.notify_one(); +} + +void DelayedEventThread::RunThread() +{ + std::unique_lock lock(mutex_); + while (!done_) { + if (handler_) { + // Make sure we don't dispatch a stale wait if it changes while we're waiting. + const auto latched_when = when_; + if ( + (std::cv_status::timeout == cv_.wait_until(lock, latched_when)) && handler_ && + (when_ <= latched_when)) + { + auto handler = std::move(handler_); + handler_ = {}; + asio::post(executor_, std::move(handler)); + } + } else { + // Wait indefinitely until we get signaled that there's something worth looking at. + cv_.wait(lock); + } + } +} + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp b/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp new file mode 100644 index 000000000..0e1b4c93c --- /dev/null +++ b/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp @@ -0,0 +1,60 @@ +// Copyright 2025 Brad Martin +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace rclpy +{ +namespace events_executor +{ + +/// This object manages posting an event handler to an asio context after a specified delay. The +/// current delay may be changed or canceled at any time. This is done by way of a self-contained +/// child thread to perform the wait. +// TODO(bmartin427) At the moment, this object really doesn't make any sense, vs just using an asio +// timer; however, this is an intermediate step towards migrating away from asio altogether. +class DelayedEventThread +{ +public: + explicit DelayedEventThread(const asio::any_io_executor &); + ~DelayedEventThread(); + + /// Schedules an event handler to be enqueued at the specified time point. Replaces any previous + /// wait and handler, which will never be dispatched if it has not been already. + void EnqueueAt(std::chrono::steady_clock::time_point when, std::function handler); + + /// Cancels any previously-scheduled handler. + void Cancel() {EnqueueAt({}, {});} + +private: + void RunThread(); + + asio::any_io_executor executor_; + std::mutex mutex_; + bool done_{}; + std::condition_variable cv_; + std::chrono::steady_clock::time_point when_; + std::function handler_; + std::thread thread_; +}; + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index 7551e75dc..f7b17df8f 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -25,8 +25,8 @@ #include #include -#include +#include "events_executor/delayed_event_thread.hpp" #include "timer.hpp" namespace pl = std::placeholders; @@ -197,18 +197,22 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_this next_ready_time_ns; for (const auto & timer_cb_pair : timers_) { auto this_next_time_ns = GetNextCallTimeNanoseconds(timer_cb_pair.first); if (this_next_time_ns) { - if (*this_next_time_ns <= now) { + if (*this_next_time_ns <= rcl_now) { ready_timers_.insert(timer_cb_pair.first); asio::post(executor_, CallIfAlive(&ClockManager::DispatchTimer, timer_cb_pair.first)); } else if (!next_ready_time_ns || (*this_next_time_ns < *next_ready_time_ns)) { @@ -222,16 +226,11 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_this> timers_; std::unordered_set ready_timers_; - asio::steady_timer next_update_wait_{executor_}; + DelayedEventThread next_update_wait_{executor_}; }; RclTimersManager::RclTimersManager(const asio::any_io_executor & executor) From 8f764df10186c9a23d27b69f58beb6736944bdf1 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Wed, 29 Jan 2025 03:48:02 +0000 Subject: [PATCH 29/31] Correct busy-looping in async callback handling This isn't ideal because there are some ways async callbacks could become unblocked which wouldn't get noticed right away (if at all); however this seems to match the behavior of SingleThreadedExecutor. Signed-off-by: Brad Martin --- .../rclpy/events_executor/events_executor.cpp | 32 ++++++++++++++++--- .../rclpy/events_executor/events_executor.hpp | 8 +++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index d04d2b8af..7223e8eb8 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -160,6 +160,8 @@ void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_use throw std::runtime_error("Attempt to spin an already-spinning Executor"); } stop_after_user_callback_ = stop_after_user_callback; + // Any blocked tasks may have become unblocked while we weren't looking. + PostOutstandingTasks(); // Release the GIL while we block. Any callbacks on the io_context that want to touch Python // will need to reacquire it though. py::gil_scoped_release gil_release; @@ -344,6 +346,8 @@ void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t num got_none = true; } } + + PostOutstandingTasks(); } void EventsExecutor::HandleAddedTimer(py::handle timer) {timers_manager_.AddTimer(timer);} @@ -385,6 +389,7 @@ void EventsExecutor::HandleTimerReady(py::handle timer, const rcl_timer_call_inf } else if (stop_after_user_callback_) { io_context_.stop(); } + PostOutstandingTasks(); } void EventsExecutor::HandleAddedClient(py::handle client) @@ -455,6 +460,8 @@ void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_event } } } + + PostOutstandingTasks(); } void EventsExecutor::HandleAddedService(py::handle service) @@ -517,6 +524,8 @@ void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_eve send_response(response, header); } } + + PostOutstandingTasks(); } void EventsExecutor::HandleAddedWaitable(py::handle waitable) @@ -782,6 +791,8 @@ void EventsExecutor::HandleWaitableReady( // execute() is an async method, we need a Task to run it create_task(execute(data)); } + + PostOutstandingTasks(); } void EventsExecutor::IterateTask(py::handle task) @@ -815,12 +826,25 @@ void EventsExecutor::IterateTask(py::handle task) } } } else { - // Task needs more iteration. Post back to the asio loop again. - // TODO(bmartin427) Not sure this is correct; in particular, it's unclear how a task that needs - // to wait a while can avoid either blocking or spinning. Revisit when asyncio support is - // intentionally added. + // Task needs more iteration. Store the handle and revisit it later after the next ready + // entity which may unblock it. + // TODO(bmartin427) This matches the behavior of SingleThreadedExecutor and avoids busy + // looping, but I don't love it because if the task is waiting on something other than an rcl + // entity (e.g. an asyncio sleep, or a Future triggered from another thread, or even another + // Task), there can be arbitrarily long latency before some rcl activity causes us to go + // revisit that Task. + blocked_tasks_.push_back(task); + } +} + +void EventsExecutor::PostOutstandingTasks() +{ + for (auto & task : blocked_tasks_) { asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, task)); } + // Clear the entire outstanding tasks list. Any tasks that need further iteration will re-add + // themselves during IterateTask(). + blocked_tasks_.clear(); } void EventsExecutor::HandleCallbackExceptionInNodeEntity( diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 2e9ddc819..0eb405b15 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -146,6 +146,11 @@ class EventsExecutor /// create_task() implementation for details. void IterateTask(pybind11::handle task); + /// Posts a call to IterateTask() for every outstanding entry in tasks_; should be invoked from + /// other Handle*Ready() methods to check if any asynchronous Tasks have been unblocked by the + /// newly-handled event. + void PostOutstandingTasks(); + void HandleCallbackExceptionInNodeEntity( const pybind11::error_already_set &, pybind11::handle entity, const std::string & node_entity_attr); @@ -183,6 +188,9 @@ class EventsExecutor pybind11::set services_; pybind11::set waitables_; + /// Collection of asynchronous Tasks awaiting new events to further iterate. + std::vector blocked_tasks_; + /// Cache for rcl pointers underlying each waitables_ entry, because those are harder to retrieve /// than the other entity types. std::unordered_map waitable_entities_; From a9c21c65378f3b673389c850693e743d187cee68 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Fri, 24 Jan 2025 23:46:56 +0000 Subject: [PATCH 30/31] Replace asio::io_context with a new EventsQueue object Signed-off-by: Brad Martin --- rclpy/CMakeLists.txt | 4 +- rclpy/package.xml | 2 - .../events_executor/delayed_event_thread.cpp | 8 +-- .../events_executor/delayed_event_thread.hpp | 11 ++-- .../rclpy/events_executor/events_executor.cpp | 44 ++++++------- .../rclpy/events_executor/events_executor.hpp | 11 ++-- .../rclpy/events_executor/events_queue.cpp | 66 +++++++++++++++++++ .../rclpy/events_executor/events_queue.hpp | 62 +++++++++++++++++ .../src/rclpy/events_executor/rcl_support.cpp | 10 +-- .../src/rclpy/events_executor/rcl_support.hpp | 24 +++---- .../rclpy/events_executor/timers_manager.cpp | 32 +++++---- .../rclpy/events_executor/timers_manager.hpp | 13 ++-- 12 files changed, 197 insertions(+), 90 deletions(-) create mode 100644 rclpy/src/rclpy/events_executor/events_queue.cpp create mode 100644 rclpy/src/rclpy/events_executor/events_queue.hpp diff --git a/rclpy/CMakeLists.txt b/rclpy/CMakeLists.txt index 3542ab993..0a79cdf91 100644 --- a/rclpy/CMakeLists.txt +++ b/rclpy/CMakeLists.txt @@ -17,8 +17,6 @@ endif() find_package(ament_cmake REQUIRED) find_package(ament_cmake_python REQUIRED) -find_package(asio_cmake_module REQUIRED) -find_package(ASIO REQUIRED) find_package(lifecycle_msgs REQUIRED) find_package(rcl REQUIRED) find_package(rcl_action REQUIRED) @@ -94,6 +92,7 @@ pybind11_add_module(_rclpy_pybind11 SHARED src/rclpy/clock_event.cpp src/rclpy/events_executor/delayed_event_thread.cpp src/rclpy/events_executor/events_executor.cpp + src/rclpy/events_executor/events_queue.cpp src/rclpy/events_executor/rcl_support.cpp src/rclpy/events_executor/timers_manager.cpp src/rclpy/exceptions.cpp @@ -127,7 +126,6 @@ target_include_directories(_rclpy_pybind11 PRIVATE src/rclpy/ ) target_link_libraries(_rclpy_pybind11 PRIVATE - ${ASIO_LIBRARIES} lifecycle_msgs::lifecycle_msgs__rosidl_generator_c lifecycle_msgs::lifecycle_msgs__rosidl_typesupport_c rcl::rcl diff --git a/rclpy/package.xml b/rclpy/package.xml index 19a3bfc1b..a08514c6f 100644 --- a/rclpy/package.xml +++ b/rclpy/package.xml @@ -16,9 +16,7 @@ William Woodall ament_cmake - asio_cmake_module - asio pybind11_vendor python3-dev rcpputils diff --git a/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp b/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp index bbfc120cf..0d090fc44 100644 --- a/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp +++ b/rclpy/src/rclpy/events_executor/delayed_event_thread.cpp @@ -15,15 +15,13 @@ #include -#include - namespace rclpy { namespace events_executor { -DelayedEventThread::DelayedEventThread(const asio::any_io_executor & executor) -: executor_(executor), thread_([this]() {RunThread();}) +DelayedEventThread::DelayedEventThread(EventsQueue * events_queue) +: events_queue_(events_queue), thread_([this]() {RunThread();}) { } @@ -61,7 +59,7 @@ void DelayedEventThread::RunThread() { auto handler = std::move(handler_); handler_ = {}; - asio::post(executor_, std::move(handler)); + events_queue_->Enqueue(std::move(handler)); } } else { // Wait indefinitely until we get signaled that there's something worth looking at. diff --git a/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp b/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp index 0e1b4c93c..ecfc7851c 100644 --- a/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp +++ b/rclpy/src/rclpy/events_executor/delayed_event_thread.hpp @@ -19,22 +19,21 @@ #include #include -#include +#include "events_executor/events_queue.hpp" namespace rclpy { namespace events_executor { -/// This object manages posting an event handler to an asio context after a specified delay. The +/// This object manages posting an event handler to an EventsQueue after a specified delay. The /// current delay may be changed or canceled at any time. This is done by way of a self-contained /// child thread to perform the wait. -// TODO(bmartin427) At the moment, this object really doesn't make any sense, vs just using an asio -// timer; however, this is an intermediate step towards migrating away from asio altogether. class DelayedEventThread { public: - explicit DelayedEventThread(const asio::any_io_executor &); + /// The pointer is aliased and must live for the lifetime of this object. + explicit DelayedEventThread(EventsQueue *); ~DelayedEventThread(); /// Schedules an event handler to be enqueued at the specified time point. Replaces any previous @@ -47,7 +46,7 @@ class DelayedEventThread private: void RunThread(); - asio::any_io_executor executor_; + EventsQueue * const events_queue_; std::mutex mutex_; bool done_{}; std::condition_variable cv_; diff --git a/rclpy/src/rclpy/events_executor/events_executor.cpp b/rclpy/src/rclpy/events_executor/events_executor.cpp index 7223e8eb8..4884395d7 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.cpp +++ b/rclpy/src/rclpy/events_executor/events_executor.cpp @@ -34,8 +34,6 @@ #include #include -#include - #include "client.hpp" #include "context.hpp" #include "service.hpp" @@ -55,11 +53,10 @@ EventsExecutor::EventsExecutor(py::object context) inspect_signature_(py::module_::import("inspect").attr("signature")), rclpy_task_(py::module_::import("rclpy.task").attr("Task")), rclpy_timer_timer_info_(py::module_::import("rclpy.timer").attr("TimerInfo")), - signal_callback_([this]() {io_context_.stop();}), - work_(asio::make_work_guard(io_context_.get_executor())), - rcl_callback_manager_(io_context_.get_executor()), + signal_callback_([this]() {events_queue_.Stop();}), + rcl_callback_manager_(&events_queue_), timers_manager_( - io_context_.get_executor(), std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1, pl::_2)) + &events_queue_, std::bind(&EventsExecutor::HandleTimerReady, this, pl::_1, pl::_2)) { } @@ -77,7 +74,7 @@ pybind11::object EventsExecutor::create_task( // manual refcounting on it instead. py::handle cb_task_handle = task; cb_task_handle.inc_ref(); - asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); + events_queue_.Enqueue(std::bind(&EventsExecutor::IterateTask, this, cb_task_handle)); return task; } @@ -87,7 +84,7 @@ bool EventsExecutor::shutdown(std::optional timeout) // not try to go access that context during this method or we can deadlock. // https://github.com/ros2/rclpy/blob/06d78fb28a6d61ede793201ae75474f3e5432b47/rclpy/rclpy/context.py#L101-L103 - io_context_.stop(); + events_queue_.Stop(); // Block until spinning is done, or timeout. Release the GIL while we block though. { @@ -138,7 +135,7 @@ void EventsExecutor::wake() { if (!wake_pending_.exchange(true)) { // Update tracked entities. - asio::post(io_context_, [this]() { + events_queue_.Enqueue([this]() { py::gil_scoped_acquire gil_acquire; UpdateEntitiesFromNodes(!py::cast(rclpy_context_.attr("ok")())); }); @@ -162,21 +159,18 @@ void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_use stop_after_user_callback_ = stop_after_user_callback; // Any blocked tasks may have become unblocked while we weren't looking. PostOutstandingTasks(); - // Release the GIL while we block. Any callbacks on the io_context that want to touch Python + // Release the GIL while we block. Any callbacks on the events queue that want to touch Python // will need to reacquire it though. py::gil_scoped_release gil_release; if (timeout_sec) { const auto timeout_ns = std::chrono::duration_cast( std::chrono::duration(*timeout_sec)); const auto end = std::chrono::steady_clock::now() + timeout_ns; - // Dispatch anything that's immediately ready, even with zero timeout - io_context_.poll(); - // Now possibly block until the end of the timeout period - io_context_.run_until(end); + events_queue_.RunUntil(end); } else { - io_context_.run(); + events_queue_.Run(); } - io_context_.restart(); + events_queue_.Restart(); } const bool ok = py::cast(rclpy_context_.attr("ok")()); @@ -188,7 +182,7 @@ void EventsExecutor::spin(std::optional timeout_sec, bool stop_after_use void EventsExecutor::spin_until_future_complete( py::handle future, std::optional timeout_sec, bool stop_after_user_callback) { - py::cpp_function cb([this](py::handle) {io_context_.stop();}); + py::cpp_function cb([this](py::handle) {events_queue_.Stop();}); future.attr("add_done_callback")(cb); spin(timeout_sec, stop_after_user_callback); // In case the future didn't complete (we hit the timeout or dispatched a different user callback @@ -253,7 +247,7 @@ void EventsExecutor::UpdateEntitiesFromNodes(bool shutdown) if (shutdown) { // Stop spinning after everything is torn down. - io_context_.stop(); + events_queue_.Stop(); } } @@ -307,7 +301,7 @@ void EventsExecutor::HandleRemovedSubscription(py::handle subscription) void EventsExecutor::HandleSubscriptionReady(py::handle subscription, size_t number_of_events) { if (stop_after_user_callback_) { - io_context_.stop(); + events_queue_.Stop(); } py::gil_scoped_acquire gil_acquire; @@ -387,7 +381,7 @@ void EventsExecutor::HandleTimerReady(py::handle timer, const rcl_timer_call_inf // Create a Task to manage iteration of this coroutine later. create_task(result); } else if (stop_after_user_callback_) { - io_context_.stop(); + events_queue_.Stop(); } PostOutstandingTasks(); } @@ -424,7 +418,7 @@ void EventsExecutor::HandleRemovedClient(py::handle client) void EventsExecutor::HandleClientReady(py::handle client, size_t number_of_events) { if (stop_after_user_callback_) { - io_context_.stop(); + events_queue_.Stop(); } py::gil_scoped_acquire gil_acquire; @@ -496,7 +490,7 @@ void EventsExecutor::HandleRemovedService(py::handle service) void EventsExecutor::HandleServiceReady(py::handle service, size_t number_of_events) { if (stop_after_user_callback_) { - io_context_.stop(); + events_queue_.Stop(); } py::gil_scoped_acquire gil_acquire; @@ -770,7 +764,7 @@ void EventsExecutor::HandleWaitableReady( py::handle waitable, std::shared_ptr wait_set, size_t number_of_events) { if (stop_after_user_callback_) { - io_context_.stop(); + events_queue_.Stop(); } // Largely based on rclpy.Executor._take_waitable() // https://github.com/ros2/rclpy/blob/a19180c238d4d97ed2b58868d8fb7fa3e3b621f2/rclpy/rclpy/executors.py#L447-L454 @@ -798,7 +792,7 @@ void EventsExecutor::HandleWaitableReady( void EventsExecutor::IterateTask(py::handle task) { if (stop_after_user_callback_) { - io_context_.stop(); + events_queue_.Stop(); } py::gil_scoped_acquire gil_acquire; // Calling this won't throw, but it may set the exception property on the task object. @@ -840,7 +834,7 @@ void EventsExecutor::IterateTask(py::handle task) void EventsExecutor::PostOutstandingTasks() { for (auto & task : blocked_tasks_) { - asio::post(io_context_, std::bind(&EventsExecutor::IterateTask, this, task)); + events_queue_.Enqueue(std::bind(&EventsExecutor::IterateTask, this, task)); } // Clear the entire outstanding tasks list. Any tasks that need further iteration will re-add // themselves during IterateTask(). diff --git a/rclpy/src/rclpy/events_executor/events_executor.hpp b/rclpy/src/rclpy/events_executor/events_executor.hpp index 0eb405b15..8a81d6f60 100644 --- a/rclpy/src/rclpy/events_executor/events_executor.hpp +++ b/rclpy/src/rclpy/events_executor/events_executor.hpp @@ -31,8 +31,7 @@ #include #include -#include - +#include "events_executor/events_queue.hpp" #include "events_executor/rcl_support.hpp" #include "events_executor/scoped_with.hpp" #include "events_executor/timers_manager.hpp" @@ -89,7 +88,7 @@ class EventsExecutor }; /// Updates the sets of known entities based on the currently tracked nodes. This is not thread - /// safe, so it must be posted to the io_context if the executor is currently spinning. Expects + /// safe, so it must be posted to the EventsQueue if the executor is currently spinning. Expects /// the GIL to be held before calling. If @p shutdown is true, a purge of all known nodes and /// entities is forced. void UpdateEntitiesFromNodes(bool shutdown); @@ -168,16 +167,14 @@ class EventsExecutor const pybind11::object rclpy_task_; const pybind11::object rclpy_timer_timer_info_; - asio::io_context io_context_; + EventsQueue events_queue_; ScopedSignalCallback signal_callback_; - // Never let asio auto stop if there's nothing to do - asio::executor_work_guard work_; pybind11::set nodes_; ///< The set of all nodes we're executing std::atomic wake_pending_{}; ///< An unhandled call to wake() has been made std::timed_mutex spinning_mutex_; ///< Held while a thread is spinning - /// This flag is used by spin_once() to signal that the io_context_ should be stopped after a + /// This flag is used by spin_once() to signal that the EventsQueue should be stopped after a /// single user-visible callback has been dispatched. bool stop_after_user_callback_{}; diff --git a/rclpy/src/rclpy/events_executor/events_queue.cpp b/rclpy/src/rclpy/events_executor/events_queue.cpp new file mode 100644 index 000000000..a65c22bc5 --- /dev/null +++ b/rclpy/src/rclpy/events_executor/events_queue.cpp @@ -0,0 +1,66 @@ +// Copyright 2025 Brad Martin +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "events_executor/events_queue.hpp" + +namespace rclpy +{ +namespace events_executor +{ + +EventsQueue::~EventsQueue() {} + +void EventsQueue::Enqueue(std::function event_handler) +{ + { + std::unique_lock lock(mutex_); + queue_.push(event_handler); + } + cv_.notify_one(); +} + +void EventsQueue::Run() {RunUntil(std::chrono::steady_clock::time_point::max());} + +void EventsQueue::RunUntil(std::chrono::steady_clock::time_point deadline) +{ + while (true) { + std::function handler; + { + std::unique_lock lock(mutex_); + cv_.wait_until(lock, deadline, [this]() {return stopped_ || !queue_.empty();}); + if (stopped_ || queue_.empty()) { + // We stopped for some reason other than being ready to run something (stopped or timeout) + return; + } + handler = queue_.front(); + queue_.pop(); + } + handler(); + } +} + +void EventsQueue::Stop() +{ + std::unique_lock lock(mutex_); + stopped_ = true; + cv_.notify_one(); +} + +void EventsQueue::Restart() +{ + std::unique_lock lock(mutex_); + stopped_ = false; +} + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/events_queue.hpp b/rclpy/src/rclpy/events_executor/events_queue.hpp new file mode 100644 index 000000000..a17d3ccde --- /dev/null +++ b/rclpy/src/rclpy/events_executor/events_queue.hpp @@ -0,0 +1,62 @@ +// Copyright 2025 Brad Martin +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include +#include +#include + +namespace rclpy +{ +namespace events_executor +{ + +/// This class represents a queue of event handlers to dispatch. Handlers may be enqueued from any +/// thread, and will always be dispatched during a call to a Run*() method on the thread invoking +/// that method. Multiple threads may not invoke Run*() methods simultaneously. +class EventsQueue +{ +public: + ~EventsQueue(); + + /// Add an event handler to the queue to be dispatched. Can be invoked by any thread. + void Enqueue(std::function); + + /// Run event handlers indefinitely, until stopped. + void Run(); + + /// Run all ready event handlers, and any that become ready before the given deadline. Calling + /// Stop() will make this return immediately even if ready handlers are enqueued. + void RunUntil(std::chrono::steady_clock::time_point); + + /// Causes any Run*() methods outstanding to return immediately. Can be invoked from any thread. + /// The stopped condition persists (causing any *subsequent* Run*() calls to also return) until + /// Restart() is invoked. + void Stop(); + + /// Ends a previous stopped condition, allowing Run*() methods to operate again. May be invoked + /// from any thread. + void Restart(); + +private: + std::queue> queue_; + std::mutex mutex_; + std::condition_variable cv_; + bool stopped_{}; +}; + +} // namespace events_executor +} // namespace rclpy diff --git a/rclpy/src/rclpy/events_executor/rcl_support.cpp b/rclpy/src/rclpy/events_executor/rcl_support.cpp index fa581132a..3a8826f70 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.cpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.cpp @@ -16,8 +16,6 @@ #include -#include - namespace py = pybind11; namespace rclpy @@ -31,10 +29,8 @@ extern "C" void RclEventCallbackTrampoline(const void * user_data, size_t number (*cb)(number_of_events); } -RclCallbackManager::RclCallbackManager(const asio::any_io_executor & executor) -: executor_(executor) -{ -} +RclCallbackManager::RclCallbackManager(EventsQueue * events_queue) +: events_queue_(events_queue) {} RclCallbackManager::~RclCallbackManager() { @@ -58,7 +54,7 @@ const void * RclCallbackManager::MakeCallback( CbEntry new_entry; new_entry.cb = std::make_unique>([this, callback, key](size_t number_of_events) { - asio::post(executor_, [this, callback, key, number_of_events]() { + events_queue_->Enqueue([this, callback, key, number_of_events]() { if (!owned_cbs_.count(key)) { // This callback has been removed, just drop it as the objects it may want to touch may // no longer exist. diff --git a/rclpy/src/rclpy/events_executor/rcl_support.hpp b/rclpy/src/rclpy/events_executor/rcl_support.hpp index e8d94a48b..422dd27bb 100644 --- a/rclpy/src/rclpy/events_executor/rcl_support.hpp +++ b/rclpy/src/rclpy/events_executor/rcl_support.hpp @@ -19,8 +19,7 @@ #include #include -#include - +#include "events_executor/events_queue.hpp" #include "events_executor/scoped_with.hpp" namespace rclpy @@ -33,21 +32,22 @@ namespace events_executor /// /// Note that RCL callbacks are invoked in some arbitrary thread originating from the middleware. /// Callbacks should process quickly to avoid blocking the middleware; i.e. all actual work should -/// be posted to an asio loop in another thread. +/// be posted to an EventsQueue running in another thread. extern "C" void RclEventCallbackTrampoline(const void * user_data, size_t number_of_events); /// Creates and maintains callback wrappers used with the RCL C library. class RclCallbackManager { public: - /// All user callbacks will be posted on the @p executor given to the constructor. These - /// callbacks will be invoked without the Python Global Interpreter Lock held, so if they need to - /// access Python at all make sure to acquire that explicitly. - explicit RclCallbackManager(const asio::any_io_executor & executor); + /// All user callbacks will be posted on the @p events_queue given to the constructor. This + /// pointer is aliased and must live for the lifetime of this object. These callbacks will be + /// invoked without the Python Global Interpreter Lock held, so if they need to access Python at + /// all make sure to acquire that explicitly. + explicit RclCallbackManager(EventsQueue * events_queue); ~RclCallbackManager(); - /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a pointer to the - /// rcl object that will be associated with the callback. @p with protects the _rclpy object + /// Creates a callback wrapper to be passed to RCL C functions. @p key should be a pointer to + /// the rcl object that will be associated with the callback. @p with protects the _rclpy object /// handle owning the RCL object, for the duration the callback is established. const void * MakeCallback( const void * key, std::function callback, std::shared_ptr with); @@ -58,15 +58,15 @@ class RclCallbackManager private: /// The C RCL interface deals in raw pointers, so someone needs to own the C++ function objects - /// we'll be calling into. We use unique pointers so the raw pointer to the object remains stable - /// while the map is manipulated. + /// we'll be calling into. We use unique pointers so the raw pointer to the object remains + /// stable while the map is manipulated. struct CbEntry { std::unique_ptr> cb; std::shared_ptr with; }; - asio::any_io_executor executor_; + EventsQueue * const events_queue_; /// The map key is the raw pointer to the RCL entity object (subscription, etc) associated with /// the callback. diff --git a/rclpy/src/rclpy/events_executor/timers_manager.cpp b/rclpy/src/rclpy/events_executor/timers_manager.cpp index f7b17df8f..796436f6a 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.cpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.cpp @@ -24,8 +24,6 @@ #include #include -#include - #include "events_executor/delayed_event_thread.hpp" #include "timer.hpp" @@ -79,13 +77,13 @@ extern "C" void RclTimerResetTrampoline(const void * user_data, size_t) } // namespace /// Manages a single clock source, and all timers operating on that source. All methods (including -/// construction and destruction) are assumed to happen on the thread running the provided asio -/// executor. +/// construction and destruction) are assumed to happen on the thread running the provided events +/// queue. class RclTimersManager::ClockManager : public std::enable_shared_from_this { public: - ClockManager(const asio::any_io_executor & executor, rcl_clock_t * clock) - : executor_(executor), clock_(clock) + ClockManager(EventsQueue * events_queue, rcl_clock_t * clock) + : events_queue_(events_queue), clock_(clock) { // Need to establish a clock jump callback so we can tell when debug time is updated. rcl_jump_threshold_t threshold{.on_clock_change = true, .min_forward = 1, .min_backward = -1}; @@ -102,7 +100,7 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_thisEnqueue(CallIfAlive(&ClockManager::HandleJump, on_debug)); }; if ( RCL_RET_OK != @@ -114,7 +112,7 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_thisEnqueue(CallIfAlive(&ClockManager::UpdateTimers));}; // Initialize which timebase we're on if (clock_->type == RCL_ROS_TIME) { @@ -214,7 +212,7 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_thisEnqueue(CallIfAlive(&ClockManager::DispatchTimer, timer_cb_pair.first)); } else if (!next_ready_time_ns || (*this_next_time_ns < *next_ready_time_ns)) { next_ready_time_ns = this_next_time_ns; } @@ -240,7 +238,7 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_thisEnqueue(CallIfAlive(&ClockManager::UpdateTimers)); } const auto map_it = timers_.find(rcl_timer); @@ -285,7 +283,7 @@ class RclTimersManager::ClockManager : public std::enable_shared_from_this> timers_; std::unordered_set ready_timers_; - DelayedEventThread next_update_wait_{executor_}; + DelayedEventThread next_update_wait_{events_queue_}; }; -RclTimersManager::RclTimersManager(const asio::any_io_executor & executor) -: executor_(executor) {} +RclTimersManager::RclTimersManager(EventsQueue * events_queue) +: events_queue_(events_queue) {} RclTimersManager::~RclTimersManager() {} @@ -323,7 +321,7 @@ void RclTimersManager::AddTimer( auto it = clock_managers_.find(clock); if (it == clock_managers_.end()) { std::tie(it, std::ignore) = clock_managers_.insert( - std::make_pair(clock, std::make_shared(executor_, clock))); + std::make_pair(clock, std::make_shared(events_queue_, clock))); } it->second->AddTimer(timer, ready_callback); } @@ -342,9 +340,9 @@ void RclTimersManager::RemoveTimer(rcl_timer_t * timer) } TimersManager::TimersManager( - const asio::any_io_executor & executor, + EventsQueue * events_queue, std::function timer_ready_callback) -: rcl_manager_(executor), ready_callback_(timer_ready_callback) +: rcl_manager_(events_queue), ready_callback_(timer_ready_callback) { } diff --git a/rclpy/src/rclpy/events_executor/timers_manager.hpp b/rclpy/src/rclpy/events_executor/timers_manager.hpp index 6ef1d0e1f..9fc252a56 100644 --- a/rclpy/src/rclpy/events_executor/timers_manager.hpp +++ b/rclpy/src/rclpy/events_executor/timers_manager.hpp @@ -23,8 +23,7 @@ #include #include -#include - +#include "events_executor/events_queue.hpp" #include "events_executor/python_hasher.hpp" #include "events_executor/scoped_with.hpp" @@ -37,14 +36,15 @@ namespace events_executor class RclTimersManager { public: - explicit RclTimersManager(const asio::any_io_executor &); + /// The given pointer is aliased and must live for the lifetime of this object. + explicit RclTimersManager(EventsQueue *); ~RclTimersManager(); void AddTimer(rcl_timer_t *, std::function ready_callback); void RemoveTimer(rcl_timer_t *); private: - asio::any_io_executor executor_; + EventsQueue * const events_queue_; class ClockManager; /// Handlers for each distinct clock source in the system. @@ -55,10 +55,11 @@ class RclTimersManager class TimersManager { public: - /// @p timer_ready_callback will be invoked with the timer handle and info whenever a managed + /// @param events_queue is aliased and must live for the lifetime of this object. + /// @param timer_ready_callback will be invoked with the timer handle and info whenever a managed /// timer is ready for servicing. TimersManager( - const asio::any_io_executor &, + EventsQueue * events_queue, std::function timer_ready_callback); ~TimersManager(); From 685605454665731a176d0437f8f841c1fe426719 Mon Sep 17 00:00:00 2001 From: Brad Martin Date: Wed, 29 Jan 2025 04:19:56 +0000 Subject: [PATCH 31/31] Add EventsExecutor to new test_executor test from merge Signed-off-by: Brad Martin --- rclpy/test/test_executor.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/rclpy/test/test_executor.py b/rclpy/test/test_executor.py index 6261583aa..df9937e88 100644 --- a/rclpy/test/test_executor.py +++ b/rclpy/test/test_executor.py @@ -288,23 +288,24 @@ async def coroutine(): def test_create_task_coroutine_cancel(self) -> None: self.assertIsNotNone(self.node.handle) - executor = SingleThreadedExecutor(context=self.context) - executor.add_node(self.node) + for cls in [SingleThreadedExecutor, EventsExecutor]: + executor = cls(context=self.context) + executor.add_node(self.node) - async def coroutine(): - return 'Sentinel Result' + async def coroutine(): + return 'Sentinel Result' - future = executor.create_task(coroutine) - self.assertFalse(future.done()) - self.assertFalse(future.cancelled()) + future = executor.create_task(coroutine) + self.assertFalse(future.done()) + self.assertFalse(future.cancelled()) - future.cancel() - self.assertTrue(future.cancelled()) + future.cancel() + self.assertTrue(future.cancelled()) - executor.spin_until_future_complete(future) - self.assertFalse(future.done()) - self.assertTrue(future.cancelled()) - self.assertEqual(None, future.result()) + executor.spin_until_future_complete(future) + self.assertFalse(future.done()) + self.assertTrue(future.cancelled()) + self.assertEqual(None, future.result()) def test_create_task_normal_function(self) -> None: self.assertIsNotNone(self.node.handle)