diff --git a/rclpy/test/benchmark/test_benchmark_actions.py b/rclpy/test/benchmark/test_benchmark_actions.py index e11a7f68e..53df54077 100644 --- a/rclpy/test/benchmark/test_benchmark_actions.py +++ b/rclpy/test/benchmark/test_benchmark_actions.py @@ -13,15 +13,17 @@ # limitations under the License. from action_msgs.msg import GoalStatus -import pytest import rclpy from rclpy.action import ActionClient, ActionServer from rclpy.executors import SingleThreadedExecutor -from rclpy.qos import qos_profile_services_default, QoSProfile, ReliabilityPolicy +from rclpy.qos import qos_profile_services_default, ReliabilityPolicy from test_msgs.action import Fibonacci -def test_one_goal(benchmark): +ONE_HUNDRED = 100 + + +def test_one_hundred_goals(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: @@ -38,7 +40,7 @@ def test_one_goal(benchmark): def execute_cb(goal_handle): goal_handle.succeed() - return response + return Fibonacci.Result() action_server = ActionServer( node, @@ -55,15 +57,16 @@ def execute_cb(goal_handle): assert action_client.wait_for_server(timeout_sec=5.0) def bm(): - fut = action_client.send_goal_async(Fibonacci.Goal()) - executor.spin_until_future_complete(fut) - goal_handle = fut.result() # Raises if there was an error - assert goal_handle.accepted - result_fut= goal_handle.get_result_async() - executor.spin_until_future_complete(result_fut) - return result_fut.result().status + for _ in range(ONE_HUNDRED): + goal_fut = action_client.send_goal_async(Fibonacci.Goal()) + executor.spin_until_future_complete(goal_fut) + client_goal_handle = goal_fut.result() + assert client_goal_handle.accepted + result_fut = client_goal_handle.get_result_async() + executor.spin_until_future_complete(result_fut) + assert GoalStatus.STATUS_SUCCEEDED == result_fut.result().status - assert GoalStatus.STATUS_SUCCEEDED == benchmark(bm) + benchmark(bm) executor.shutdown() action_client.destroy() diff --git a/rclpy/test/benchmark/test_benchmark_client_service.py b/rclpy/test/benchmark/test_benchmark_client_service.py index 47a75f038..db88673da 100644 --- a/rclpy/test/benchmark/test_benchmark_client_service.py +++ b/rclpy/test/benchmark/test_benchmark_client_service.py @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import rclpy from rclpy.executors import SingleThreadedExecutor -from rclpy.qos import qos_profile_services_default, QoSProfile, ReliabilityPolicy +from rclpy.qos import qos_profile_services_default, ReliabilityPolicy from test_msgs.srv import Empty as EmptySrv -def test_one_call(benchmark): +ONE_THOUSAND = 1000 + + +def test_one_thousand_service_calls(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: @@ -40,11 +42,12 @@ def cb(_, response): assert client.wait_for_service(timeout_sec=5.0) def bm(): - fut = client.call_async(EmptySrv.Request()) - executor.spin_until_future_complete(fut) - return fut.result() # Raises if there was an error + for _ in range(ONE_THOUSAND): + fut = client.call_async(EmptySrv.Request()) + executor.spin_until_future_complete(fut) + assert fut.result() # Raises if there was an error - assert isinstance(benchmark(bm), EmptySrv.Response) + benchmark(bm) executor.shutdown() node.destroy_client(client) diff --git a/rclpy/test/benchmark/test_benchmark_guard_condition.py b/rclpy/test/benchmark/test_benchmark_guard_condition.py index 34f508fb3..e44c82c68 100644 --- a/rclpy/test/benchmark/test_benchmark_guard_condition.py +++ b/rclpy/test/benchmark/test_benchmark_guard_condition.py @@ -12,21 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import rclpy from rclpy.executors import SingleThreadedExecutor -def test_one_callback(benchmark): +ONE_THOUSAND = 1000 + + +def test_one_thousand_guard_callbacks(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: node = rclpy.create_node('benchmark_many_gc_calls', context=context) - called = False + num_calls = 0 def cb(): - nonlocal called - called = True + nonlocal num_calls + num_calls += 1 gc = node.create_guard_condition(cb) @@ -34,11 +36,13 @@ def cb(): executor.add_node(node) def bm(): - nonlocal called - gc.trigger() - while not called: + nonlocal num_calls + # Reset for each benchmark run + num_calls = 0 + while num_calls < ONE_THOUSAND: + gc.trigger() executor.spin_once(timeout_sec=0) - return called + return num_calls == ONE_THOUSAND assert benchmark(bm) @@ -49,16 +53,16 @@ def bm(): rclpy.shutdown(context=context) -def test_one_coroutine_callback(benchmark): +def test_one_thousand_guard_coroutine_callbacks(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: node = rclpy.create_node('benchmark_many_gc_calls', context=context) - called = False + num_calls = 0 async def coro(): - nonlocal called - called = True + nonlocal num_calls + num_calls += 1 gc = node.create_guard_condition(coro) @@ -66,11 +70,13 @@ async def coro(): executor.add_node(node) def bm(): - nonlocal called - gc.trigger() - while not called: + nonlocal num_calls + # Reset for each benchmark run + num_calls = 0 + while num_calls < ONE_THOUSAND: + gc.trigger() executor.spin_once(timeout_sec=0) - return called + return num_calls == ONE_THOUSAND assert benchmark(bm) diff --git a/rclpy/test/benchmark/test_benchmark_pub_sub.py b/rclpy/test/benchmark/test_benchmark_pub_sub.py index 62cd679d6..7cf1579a8 100644 --- a/rclpy/test/benchmark/test_benchmark_pub_sub.py +++ b/rclpy/test/benchmark/test_benchmark_pub_sub.py @@ -12,25 +12,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import rclpy from rclpy.executors import SingleThreadedExecutor from rclpy.qos import QoSProfile, ReliabilityPolicy from test_msgs.msg import Empty as EmptyMsg -def test_one_message(benchmark): +ONE_THOUSAND = 1000 + + +def test_one_thousand_messages(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: node = rclpy.create_node('benchmark_pub_sub', context=context) qos = QoSProfile(depth=1, reliability=ReliabilityPolicy.RELIABLE) pub = node.create_publisher(EmptyMsg, 'empty', qos) - called = False + num_calls = 0 def cb(_): - nonlocal called - called = True + nonlocal num_calls + num_calls += 1 + # Send next message + pub.publish(EmptyMsg()) sub = node.create_subscription(EmptyMsg, 'empty', cb, qos) @@ -42,11 +46,14 @@ def cb(_): executor.spin_once(timeout_sec=0.01) def bm(): - nonlocal called + nonlocal num_calls + # Reset for each benchmark run + num_calls = 0 + # Prime the loop with a message pub.publish(EmptyMsg()) - while not called: + while num_calls < ONE_THOUSAND: executor.spin_once(timeout_sec=0) - return called + return num_calls == ONE_THOUSAND assert benchmark(bm) diff --git a/rclpy/test/benchmark/test_benchmark_timer.py b/rclpy/test/benchmark/test_benchmark_timer.py index b93838f27..e1488aeb9 100644 --- a/rclpy/test/benchmark/test_benchmark_timer.py +++ b/rclpy/test/benchmark/test_benchmark_timer.py @@ -12,33 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest import rclpy from rclpy.executors import SingleThreadedExecutor -def test_one_callback(benchmark): +ONE_THOUSAND = 1000 + + +def test_one_thousand_timer_coroutine_callbacks(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: node = rclpy.create_node('benchmark_many_timer_calls', context=context) - called = False + num_calls = 0 - def timer_cb(): - nonlocal called - called = True + async def timer_coro(): + nonlocal num_calls + num_calls += 1 - timer = node.create_timer(0, timer_cb) + timer = node.create_timer(0, timer_coro) executor = SingleThreadedExecutor(context=context) executor.add_node(node) - def bm(): - nonlocal called - while not called: + nonlocal num_calls + # Reset for each benchmark run + num_calls = 0 + while num_calls < ONE_THOUSAND: executor.spin_once(timeout_sec=0) - return called + return num_calls == ONE_THOUSAND assert benchmark(bm) @@ -49,27 +52,29 @@ def bm(): rclpy.shutdown(context=context) -def test_one_coroutine_callback(benchmark): +def test_one_thousand_timer_callbacks(benchmark): context = rclpy.context.Context() rclpy.init(context=context) try: node = rclpy.create_node('benchmark_many_timer_calls', context=context) - called = False + num_calls = 0 - async def timer_coro(): - nonlocal called - called = True + def timer_cb(): + nonlocal num_calls + num_calls += 1 - timer = node.create_timer(0, timer_coro) + timer = node.create_timer(0, timer_cb) executor = SingleThreadedExecutor(context=context) executor.add_node(node) def bm(): - nonlocal called - while not called: + nonlocal num_calls + # Reset for each benchmark run + num_calls = 0 + while num_calls < ONE_THOUSAND: executor.spin_once(timeout_sec=0) - return called + return num_calls == ONE_THOUSAND assert benchmark(bm)