From c1431f5ed03c816d18a4be0978ebf52f1db78b5b Mon Sep 17 00:00:00 2001 From: Quanyi Li Date: Thu, 18 Jan 2024 23:00:27 +0000 Subject: [PATCH] Sumo integration (#598) * add time me prefix * let's have a base agent manager * rename vehicle in base_env to agent * env.vehicle -> env.agent * self.vehicles -> self.agents and self.vehicle -> self.agent * format * format * finish -> _finish * check agent manager * show lines for traffic light * refactor agent manager * add before_step * move the get_Action_space to agentManager * runnable return dict * update drawer and lights * format * light * cull * new sensor creation * wise sensor creation and multi rendering * update sensor creation * fix test * fix * optimize * format * fix bug * format * fix test * fix bug * format * fix bug * fix bug * format * fix bug --- bridges/ros_bridge/ros_socket_server.py | 22 +- documentation/source/action.ipynb | 2 +- documentation/source/points_and_lines.ipynb | 2 +- documentation/source/reward_cost_done.ipynb | 2 +- documentation/source/rl_environments.ipynb | 22 +- metadrive/base_class/base_runnable.py | 5 +- metadrive/component/sensors/rgb_camera.py | 2 +- .../traffic_light/base_traffic_light.py | 46 +++- metadrive/engine/base_engine.py | 10 +- metadrive/engine/core/draw.py | 7 +- metadrive/engine/core/engine_core.py | 9 +- metadrive/engine/core/main_camera.py | 11 +- metadrive/engine/interface.py | 4 +- metadrive/engine/scene_cull.py | 2 +- metadrive/envs/base_env.py | 206 ++++++-------- metadrive/envs/gym_wrapper.py | 2 +- .../envs/legacy_envs/mix_waymo_pg_env.py | 10 +- metadrive/envs/marl_envs/marl_bidirection.py | 22 +- metadrive/envs/marl_envs/marl_bottleneck.py | 22 +- .../envs/marl_envs/marl_inout_roundabout.py | 12 +- metadrive/envs/marl_envs/marl_intersection.py | 18 +- metadrive/envs/marl_envs/marl_parking_lot.py | 26 +- metadrive/envs/marl_envs/marl_racing_env.py | 8 +- metadrive/envs/marl_envs/marl_tollgate.py | 26 +- .../envs/marl_envs/multi_agent_metadrive.py | 34 +-- metadrive/envs/marl_envs/tinyinter.py | 21 +- metadrive/envs/metadrive_env.py | 14 +- metadrive/envs/scenario_env.py | 12 +- metadrive/envs/varying_dynamics_env.py | 14 +- .../examples/Basic_MetaDrive_Usages.ipynb | 4 +- metadrive/examples/custom_inramp_env.py | 4 +- .../examples/drive_in_multi_agent_env.py | 16 +- .../examples/drive_in_safe_metadrive_env.py | 10 +- .../examples/drive_in_single_agent_env.py | 6 +- .../generate_video_for_bev_and_interface.py | 4 +- metadrive/examples/profile_metadrive_marl.py | 6 +- metadrive/examples/top_down_metadrive.py | 2 +- .../examples/verify_image_observation.py | 7 +- metadrive/manager/agent_manager.py | 238 +++------------- metadrive/manager/base_manager.py | 257 ++++++++++++++++++ metadrive/manager/scenario_map_manager.py | 2 +- metadrive/manager/spawn_manager.py | 36 +-- metadrive/manager/traffic_manager.py | 2 +- metadrive/obs/top_down_renderer.py | 22 +- metadrive/policy/idm_policy.py | 2 +- metadrive/policy/manual_control_policy.py | 18 +- metadrive/tests/scripts/benchmark_brake.py | 28 +- metadrive/tests/scripts/capture_obs.py | 16 +- .../capture_rgb_and_send_to_other_process.py | 2 +- .../test_component/test_bicycle_model.py | 8 +- .../test_component/test_config_consistency.py | 2 +- .../test_component/test_detector_mask.py | 20 +- .../test_component/test_distance_detector.py | 20 +- .../test_set_get_vehicle_attribute.py | 162 +++++------ .../test_component/test_traffic_light.py | 6 +- .../test_vehicle_coordinates.py | 8 +- .../_test_change_friction_density_envs.py | 2 +- .../tests/test_env/test_ma_bidirection.py | 10 +- .../tests/test_env/test_ma_bottleneck_env.py | 84 +++--- .../tests/test_env/test_ma_env_force_reset.py | 6 +- .../tests/test_env/test_ma_intersection.py | 84 +++--- .../tests/test_env/test_ma_parking_lot.py | 78 +++--- metadrive/tests/test_env/test_ma_racing.py | 12 +- .../tests/test_env/test_ma_roundabout_env.py | 82 +++--- metadrive/tests/test_env/test_ma_tollgate.py | 82 +++--- .../tests/test_env/test_metadrive_env.py | 2 +- .../tests/test_env/test_naive_multi_agent.py | 6 +- .../test_env/test_varying_dynamics_env.py | 2 +- metadrive/tests/test_env/test_waymo_env.py | 2 +- .../test_save_replay_episode.py | 6 +- .../test_functionality/test_collision.py | 8 +- .../test_discrete_action.py | 4 +- .../test_horizon_termination.py | 2 +- .../test_marl_infinite_agents.py | 16 +- .../test_functionality/test_marl_reborn.py | 22 +- .../test_functionality/test_navigation.py | 6 +- .../test_functionality/test_out_of_road.py | 12 +- .../test_functionality/test_pedestrian.py | 2 +- .../test_functionality/test_random_engine.py | 28 +- .../tests/test_functionality/test_reborn.py | 4 +- .../test_scenario_randomness.py | 10 +- .../test_functionality/test_sensors_config.py | 8 +- .../test_functionality/test_traffic_mode.py | 2 +- metadrive/tests/test_installation.py | 2 +- .../test_policy/test_expert_performance.py | 10 +- .../test_policy/test_lane_change_policy.py | 6 +- .../tests/test_sensors/test_main_camera.py | 5 +- .../test_sensors/test_sensor_creation.py | 241 ++++++++++++++++ .../tests/tools/adjust_collision_model.py | 8 +- metadrive/tests/vis_block/vis_block_base.py | 2 +- metadrive/tests/vis_block/vis_yy.py | 8 +- metadrive/tests/vis_env/vis_acc_break.py | 8 +- metadrive/tests/vis_env/vis_metadrive_env.py | 20 +- .../tests/vis_env/vis_multi_agent_env.py | 4 +- metadrive/tests/vis_env/vis_topdown.py | 12 +- metadrive/tests/vis_env/vis_waymo_env.py | 2 +- .../vis_functionality/profile_rgb_cam.py | 4 +- .../tests/vis_functionality/vis_depth_cam.py | 8 +- .../vis_functionality/vis_grayscale_cam.py | 8 +- .../tests/vis_functionality/vis_mini_map.py | 2 +- .../tests/vis_functionality/vis_pedestrian.py | 10 +- .../tests/vis_functionality/vis_render_msg.py | 2 +- .../tests/vis_functionality/vis_rgb_cam.py | 4 +- .../vis_functionality/vis_rgb_depth_cam.py | 8 +- .../vis_functionality/vis_semantic_cam.py | 10 +- .../vis_two_speed_retrieve.py | 4 +- metadrive/utils/pg/utils.py | 7 +- metadrive/utils/utils.py | 21 ++ 108 files changed, 1430 insertions(+), 1069 deletions(-) create mode 100644 metadrive/tests/test_sensors/test_sensor_creation.py diff --git a/bridges/ros_bridge/ros_socket_server.py b/bridges/ros_bridge/ros_socket_server.py index 47365bdb5..0e209ddf4 100644 --- a/bridges/ros_bridge/ros_socket_server.py +++ b/bridges/ros_bridge/ros_socket_server.py @@ -55,7 +55,7 @@ def run(self, test=False): env.reset() print(HELP_MESSAGE) - env.vehicle.expert_takeover = False + env.agent.expert_takeover = False while True: o = env.step([0, 0]) if test: @@ -79,17 +79,17 @@ def run(self, test=False): print(msg) del image_data # explicit delete to free memory - lidar_data, objs = env.vehicle.lidar.perceive( - env.vehicle, + lidar_data, objs = env.agent.lidar.perceive( + env.agent, env.engine.physics_world.dynamic_world, - env.vehicle.config["lidar"]["num_lasers"], - env.vehicle.config["lidar"]["distance"], + env.agent.config["lidar"]["num_lasers"], + env.agent.config["lidar"]["distance"], height=1.0, ) - ego_x = env.vehicle.position[0] - ego_y = env.vehicle.position[1] - ego_theta = np.arctan2(env.vehicle.heading[1], env.vehicle.heading[0]) + ego_x = env.agent.position[0] + ego_y = env.agent.position[1] + ego_theta = np.arctan2(env.agent.heading[1], env.agent.heading[0]) num_data = struct.pack('i', len(objs)) obj_data = [] @@ -123,9 +123,9 @@ def run(self, test=False): del obj_data # explicit delete to free memory # convert lidar data to xyz - lidar_data = np.array(lidar_data) * env.vehicle.config["lidar"]["distance"] - lidar_range = env.vehicle.lidar._get_lidar_range( - env.vehicle.config["lidar"]["num_lasers"], env.vehicle.lidar.start_phase_offset + lidar_data = np.array(lidar_data) * env.agent.config["lidar"]["distance"] + lidar_range = env.agent.lidar._get_lidar_range( + env.agent.config["lidar"]["num_lasers"], env.agent.lidar.start_phase_offset ) point_x = lidar_data * np.cos(lidar_range) point_y = lidar_data * np.sin(lidar_range) diff --git a/documentation/source/action.ipynb b/documentation/source/action.ipynb index da3ca8a7d..ca1374549 100644 --- a/documentation/source/action.ipynb +++ b/documentation/source/action.ipynb @@ -597,7 +597,7 @@ " extra = random.randint(0, 9)\n", " action={\"action\": [0., 0.], \"extra\": extra}\n", " _,_,_,_,info = env.step(action)\n", - " extra_ = env.engine.get_policy(env.vehicle.id).extra_input\n", + " extra_ = env.engine.get_policy(env.agent.id).extra_input\n", " assert extra == extra_\n", " print(\"Extra info this step is: {}\".format(extra))\n", "finally:\n", diff --git a/documentation/source/points_and_lines.ipynb b/documentation/source/points_and_lines.ipynb index d5b10d390..717d2dfca 100644 --- a/documentation/source/points_and_lines.ipynb +++ b/documentation/source/points_and_lines.ipynb @@ -131,7 +131,7 @@ "source": [ "env.reset() # launch the simulation\n", "try:\n", - " drawer = env.engine.make_line_drawer(env.vehicle.origin, thickness=5)\n", + " drawer = env.engine.make_line_drawer(env.agent.origin, thickness=5)\n", " # rotate the drawer by 90 degree, as +x is at the right side of the car.\n", " drawer.setH(90) \n", " for i in range(100):\n", diff --git a/documentation/source/reward_cost_done.ipynb b/documentation/source/reward_cost_done.ipynb index c129867bb..8b7b27935 100644 --- a/documentation/source/reward_cost_done.ipynb +++ b/documentation/source/reward_cost_done.ipynb @@ -344,7 +344,7 @@ " TerminationState.SUCCESS: \u001b[36mself\u001b[39;49;00m._is_arrive_destination(vehicle),\n", " TerminationState.MAX_STEP: max_step,\n", " TerminationState.ENV_SEED: \u001b[36mself\u001b[39;49;00m.current_seed,\n", - " \u001b[37m# TerminationState.CURRENT_BLOCK: self.vehicle.navigation.current_road.block_ID(),\u001b[39;49;00m\n", + " \u001b[37m# TerminationState.CURRENT_BLOCK: self.agent.navigation.current_road.block_ID(),\u001b[39;49;00m\n", " \u001b[37m# crash_vehicle=False, crash_object=False, crash_building=False, out_of_road=False, arrive_dest=False,\u001b[39;49;00m\n", " }\n", "\n", diff --git a/documentation/source/rl_environments.ipynb b/documentation/source/rl_environments.ipynb index bbb930ea2..0739d426f 100644 --- a/documentation/source/rl_environments.ipynb +++ b/documentation/source/rl_environments.ipynb @@ -179,10 +179,10 @@ " training_env.reset(seed=env_seed)\n", " lane_nums.add(training_env.current_map.config[\"lane_num\"]) \n", " lane_widths.add(training_env.current_map.config[\"lane_width\"])\n", - " vehicle_models.add(training_env.vehicle.__class__.__name__)\n", + " vehicle_models.add(training_env.agent.__class__.__name__)\n", " traffic_models = set([obj.__class__ for obj in training_env.engine.traffic_manager.spawned_objects.values()])\n", " traffic_vehicle_models = traffic_vehicle_models.union(traffic_models)\n", - " assert vehicle_type[training_env.vehicle.config[\"vehicle_model\"]] is training_env.vehicle.__class__\n", + " assert vehicle_type[training_env.agent.config[\"vehicle_model\"]] is training_env.agent.__class__\n", " \n", "training_env.close()\n", "\n", @@ -761,14 +761,14 @@ " training_env.reset(seed=env_seed)\n", " lane_nums.add(training_env.current_map.config[\"lane_num\"]) \n", " lane_widths.add(training_env.current_map.config[\"lane_width\"])\n", - " vehicle_models.add(training_env.vehicle.__class__.__name__)\n", + " vehicle_models.add(training_env.agent.__class__.__name__)\n", " traffic_models = set([obj.__class__ for obj in training_env.engine.traffic_manager.spawned_objects.values()])\n", " traffic_vehicle_models = traffic_vehicle_models.union(traffic_models)\n", - " assert vehicle_type[training_env.vehicle.config[\"vehicle_model\"]] is training_env.vehicle.__class__\n", + " assert vehicle_type[training_env.agent.config[\"vehicle_model\"]] is training_env.agent.__class__\n", " \n", " # collect more\n", " for k, v in to_collect_set.items():\n", - " v.add(training_env.vehicle.config[k])\n", + " v.add(training_env.agent.config[k])\n", " \n", "training_env.close()\n", "\n", @@ -912,7 +912,7 @@ " TerminationState.SUCCESS: \u001b[36mself\u001b[39;49;00m._is_arrive_destination(vehicle),\n", " TerminationState.MAX_STEP: max_step,\n", " TerminationState.ENV_SEED: \u001b[36mself\u001b[39;49;00m.current_seed,\n", - " \u001b[37m# TerminationState.CURRENT_BLOCK: self.vehicle.navigation.current_road.block_ID(),\u001b[39;49;00m\n", + " \u001b[37m# TerminationState.CURRENT_BLOCK: self.agent.navigation.current_road.block_ID(),\u001b[39;49;00m\n", " \u001b[37m# crash_vehicle=False, crash_object=False, crash_building=False, out_of_road=False, arrive_dest=False,\u001b[39;49;00m\n", " }\n", "\n", @@ -1085,7 +1085,7 @@ " cone=env.engine.spawn_object(TrafficCone, position=[20, 7], heading_theta=0)\n", " for _ in range(100):\n", " o,r,d,_,info = env.step([0, 1])\n", - " if env.vehicle.crash_object:\n", + " if env.agent.crash_object:\n", " assert info[\"cost\"] == -5\n", " break\n", " env.engine.clear_objects([cone.id])\n", @@ -1240,7 +1240,7 @@ "try:\n", " while True:\n", " last_o=o\n", - " o,r,tm,_,i=env.step({agent_id: [0, 1] for agent_id in env.vehicles.keys()})\n", + " o,r,tm,_,i=env.step({agent_id: [0, 1] for agent_id in env.agents.keys()})\n", " m = env.render(mode=\"topdown\", \n", " film_size = (1000, 1000),\n", " screen_size = (1000, 1000),\n", @@ -1250,7 +1250,7 @@ " break\n", " \n", " # for test only\n", - " assert len(env.vehicles)<=env.config[\"num_agents\"]\n", + " assert len(env.agents)<=env.config[\"num_agents\"]\n", " if not env.config[\"allow_respawn\"]:\n", " assert len(o)<=len(last_o)\n", "finally:\n", @@ -1465,7 +1465,7 @@ "metadata": {}, "source": [ "## GymWrapper\n", - "Some legacy training frameworks may use the `openai.gym` interface whose `step()` function returns `obs`, `reward`, `termination`, and `info`. For making MetaDrive compatible with these training code, we provide an official wrapper for **all** environments of MetaDrive. The wrapped environment will have an `openai.gym` style APIs. Also, you can access all properties and methods of wrapped env using the same syntax. For example, we can still access the ego car via `env.vehicle`, even if the env is wrapped on top of `MetaDriveEnv`. " + "Some legacy training frameworks may use the `openai.gym` interface whose `step()` function returns `obs`, `reward`, `termination`, and `info`. For making MetaDrive compatible with these training code, we provide an official wrapper for **all** environments of MetaDrive. The wrapped environment will have an `openai.gym` style APIs. Also, you can access all properties and methods of wrapped env using the same syntax. For example, we can still access the ego car via `env.agent`, even if the env is wrapped on top of `MetaDriveEnv`. " ] }, { @@ -1531,7 +1531,7 @@ " o = gym_env.reset()\n", " o,r,d,i = gym_env.step([0,0])\n", " assert gym_env.config[\"accident_prob\"] == 1.0\n", - " print(\"Vehicle id:\", gym_env.vehicle.id)\n", + " print(\"Vehicle id:\", gym_env.agent.id)\n", " ret=gym_env.render(mode=\"topdown\", \n", " window=False,\n", " camera_position=(50, -70))\n", diff --git a/metadrive/base_class/base_runnable.py b/metadrive/base_class/base_runnable.py index ef20318ec..8a3deac44 100644 --- a/metadrive/base_class/base_runnable.py +++ b/metadrive/base_class/base_runnable.py @@ -48,7 +48,7 @@ def before_step(self, *args, **kwargs): """ Do Information fusion and then analyze and wait for decision """ - pass + return {} def set_action(self, *args, **kwargs): """ @@ -63,12 +63,13 @@ def step(self, *args, **kwargs): time. However some runnable instances who don't belong to the physics world and their actions are not force need to implement this function to get the action accumulated result respect to time. """ - pass + return {} def after_step(self, *args, **kwargs): """ After advancing all objects for a time period, their state should be updated for statistic or other purpose """ + return {} def reset(self, random_seed=None, *args, **kwargs): """ diff --git a/metadrive/component/sensors/rgb_camera.py b/metadrive/component/sensors/rgb_camera.py index 10fe44493..a00f6d099 100644 --- a/metadrive/component/sensors/rgb_camera.py +++ b/metadrive/component/sensors/rgb_camera.py @@ -41,7 +41,7 @@ def _setup_effect(self): fbprops.float_color = True fbprops.set_rgba_bits(16, 16, 16, 16) fbprops.set_depth_bits(24) - fbprops.set_multisamples(self.engine.pbrpipe.msaa_samples) + fbprops.set_multisamples(16) self.scene_tex = p3d.Texture() self.scene_tex.set_format(p3d.Texture.F_rgba16) self.scene_tex.set_component_type(p3d.Texture.T_float) diff --git a/metadrive/component/traffic_light/base_traffic_light.py b/metadrive/component/traffic_light/base_traffic_light.py index 594683fb5..803aed230 100644 --- a/metadrive/component/traffic_light/base_traffic_light.py +++ b/metadrive/component/traffic_light/base_traffic_light.py @@ -1,3 +1,5 @@ +import numpy as np + from metadrive.base_class.base_object import BaseObject from metadrive.constants import CamMask from metadrive.scenario.scenario_description import ScenarioDescription @@ -21,12 +23,23 @@ class BaseTrafficLight(BaseObject): PLACE_LONGITUDE = 5 def __init__( - self, lane, position=None, name=None, random_seed=None, config=None, escape_random_seed_assertion=False + self, + lane, + position=None, + name=None, + random_seed=None, + config=None, + escape_random_seed_assertion=False, + draw_line=False, + show_model=True, ): super(BaseTrafficLight, self).__init__(name, random_seed, config, escape_random_seed_assertion) self.set_metadrive_type(MetaDriveType.TRAFFIC_LIGHT) self.lane = lane self.status = MetaDriveType.LIGHT_UNKNOWN + self._draw_line = draw_line + self._show_model = show_model + self._lane_center_line = None self.lane_width = lane.width_at(0) if lane else 4 air_wall = generate_static_box_physics_body( @@ -48,7 +61,7 @@ def __init__( self.current_light = None if self.render: - if len(BaseTrafficLight.TRAFFIC_LIGHT_MODEL) == 0: + if len(BaseTrafficLight.TRAFFIC_LIGHT_MODEL) == 0 and self._show_model: for color in ["green", "red", "yellow", "unknown"]: model = self.loader.loadModel( AssetLoader.file_path("models", "traffic_light", "{}.gltf".format(color)) @@ -58,6 +71,12 @@ def __init__( model.hide(CamMask.Shadow) BaseTrafficLight.TRAFFIC_LIGHT_MODEL[color] = model self.origin.setScale(0.5, 1.2, 1.2) + if self._draw_line: + self._line_drawer = self.engine.make_line_drawer(thickness=2) + self._lane_center_line = np.array([[p[0], p[1], 0.4] for p in self.lane.get_polyline()]) + + def before_step(self, *args, **kwargs): + self.set_status(*args, **kwargs) def set_status(self, status): """ @@ -65,37 +84,52 @@ def set_status(self, status): """ pass + def _try_draw_line(self, color): + if self._draw_line: + self._line_drawer.reset() + self._line_drawer.draw_lines([self._lane_center_line], [[color for _ in self._lane_center_line]]) + def set_green(self): if self.render: if self.current_light is not None: self.current_light.detachNode() - self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["green"].instanceTo(self.origin) + if self._show_model: + self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["green"].instanceTo(self.origin) + self._try_draw_line([3 / 255, 255 / 255, 3 / 255]) self.status = MetaDriveType.LIGHT_GREEN def set_red(self): if self.render: if self.current_light is not None: self.current_light.detachNode() - self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["red"].instanceTo(self.origin) + if self._show_model: + self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["red"].instanceTo(self.origin) + self._try_draw_line([252 / 255, 0 / 255, 0 / 255]) self.status = MetaDriveType.LIGHT_RED def set_yellow(self): if self.render: if self.current_light is not None: self.current_light.detachNode() - self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["yellow"].instanceTo(self.origin) + if self._show_model: + self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["yellow"].instanceTo(self.origin) + self._try_draw_line([252 / 255, 227 / 255, 3 / 255]) self.status = MetaDriveType.LIGHT_YELLOW def set_unknown(self): if self.render: if self.current_light is not None: self.current_light.detachNode() - self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["unknown"].instanceTo(self.origin) + if self._show_model: + self.current_light = BaseTrafficLight.TRAFFIC_LIGHT_MODEL["unknown"].instanceTo(self.origin) self.status = MetaDriveType.LIGHT_UNKNOWN def destroy(self): super(BaseTrafficLight, self).destroy() self.lane = None + if self._draw_line: + self._line_drawer.reset() + self._line_drawer.removeNode() @property def top_down_color(self): diff --git a/metadrive/engine/base_engine.py b/metadrive/engine/base_engine.py index fbb12dd61..d4152ab99 100644 --- a/metadrive/engine/base_engine.py +++ b/metadrive/engine/base_engine.py @@ -11,7 +11,7 @@ from metadrive.engine.core.engine_core import EngineCore from metadrive.engine.interface import Interface from metadrive.engine.logger import get_logger, reset_logger -from metadrive.manager.base_manager import BaseManager + from metadrive.pull_asset import pull_asset from metadrive.utils import concat_step_infos from metadrive.utils.utils import is_map_related_class @@ -530,7 +530,7 @@ def _stop_replay(self): return self.STOP_REPLAY = not self.STOP_REPLAY - def register_manager(self, manager_name: str, manager: BaseManager): + def register_manager(self, manager_name: str, manager): """ Add a manager to BaseEngine, then all objects can communicate with this class :param manager_name: name shouldn't exist in self._managers and not be same as any class attribute @@ -595,9 +595,9 @@ def current_map(self): return None @property - def current_track_vehicle(self): + def current_track_agent(self): if self.main_camera is not None: - return self.main_camera.current_track_vehicle + return self.main_camera.current_track_agent elif "default_agent" in self.agents: return self.agents["default_agent"] else: @@ -661,7 +661,7 @@ def _object_clean_check(self): children = self.worldNP.getChildren() assert len(children) == 0, "NodePath are not cleaned thoroughly. Remaining NodePath: {}".format(children) - def update_manager(self, manager_name: str, manager: BaseManager, destroy_previous_manager=True): + def update_manager(self, manager_name: str, manager, destroy_previous_manager=True): """ Update an existing manager with a new one :param manager_name: existing manager name diff --git a/metadrive/engine/core/draw.py b/metadrive/engine/core/draw.py index 25a82df1e..3d818aa5a 100644 --- a/metadrive/engine/core/draw.py +++ b/metadrive/engine/core/draw.py @@ -24,10 +24,13 @@ def draw_lines(self, lineList, colorList=None): for pointList, lineColor in zip(lineList, colorList): self.moveTo(*pointList[0]) for point, seg_color, in zip(pointList[1:], lineColor): - assert len(seg_color) == 4, "color vector should have 4 component, get {} instead".format( + assert 3 <= len(seg_color) <= 4, "color vector should have 3 or 4 component, get {} instead".format( len(seg_color) ) - self.setColor(LVecBase4f(*seg_color)) + if len(seg_color) == 4: + self.setColor(LVecBase4f(*seg_color)) + else: + self.setColor(LVecBase4f(*seg_color, 1.0)) self.drawTo(*point) self.create() diff --git a/metadrive/engine/core/engine_core.py b/metadrive/engine/core/engine_core.py index f4b2afc29..876ad84e1 100644 --- a/metadrive/engine/core/engine_core.py +++ b/metadrive/engine/core/engine_core.py @@ -107,6 +107,13 @@ def __init__(self, global_config): # # "No allowed to change ptr of global config, which may cause issue" # pass # else: + config = global_config + self.main_window_disabled = False + if "main_camera" not in global_config["sensors"]: + # reduce size as we don't use the main camera content for improving efficiency + config["window_size"] = (1, 1) + self.main_window_disabled = True + self.pid = os.getpid() EngineCore.global_config = global_config self.mode = global_config["_render_mode"] @@ -182,7 +189,7 @@ def __init__(self, global_config): super(EngineCore, self).__init__(windowType=self.mode) logger.info("Known Pipes: {}".format(*GraphicsPipeSelection.getGlobalPtr().getPipeTypes())) - if self.global_config["window_size"] == (1, 1): + if self.main_window_disabled and self.mode != RENDER_MODE_NONE: self.win.setActive(False) self._all_panda_tasks = self.taskMgr.getAllTasks() diff --git a/metadrive/engine/core/main_camera.py b/metadrive/engine/core/main_camera.py index 4272e2b72..9b6a27df7 100644 --- a/metadrive/engine/core/main_camera.py +++ b/metadrive/engine/core/main_camera.py @@ -70,7 +70,7 @@ def __init__(self, engine, camera_height: float, camera_dist: float): self.direction_running_mean = deque(maxlen=self.camera_smooth_buffer_size if self.camera_smooth else 1) self.world_light = engine.world_light # light chases the chase camera, when not using global light self.inputs = InputState() - self.current_track_vehicle = None + self.current_track_agent = None # height control self.chase_camera_height = camera_height @@ -116,11 +116,12 @@ def __init__(self, engine, camera_height: float, camera_dist: float): self._in_recover = False self._last_frame_has_mouse = False - need_cuda = engine.global_config["vehicle_config"]["image_source"] == "main_camera" + need_cuda = "main_camera" in engine.global_config["sensors"] self.enable_cuda = engine.global_config["image_on_cuda"] and need_cuda self.cuda_graphics_resource = None - self.engine.sensors["main_camera"] = self + if "main_camera" in engine.global_config["sensors"]: + self.engine.sensors["main_camera"] = self if self.enable_cuda: assert _cuda_enable, "Can not enable cuda rendering pipeline" @@ -278,7 +279,7 @@ def track(self, vehicle): :param vehicle: Vehicle to chase :return: None """ - self.current_track_vehicle = vehicle + self.current_track_agent = vehicle self.engine.interface.display() pos = None if self.FOLLOW_LANE: @@ -329,7 +330,7 @@ def destroy(self): engine.task_manager.remove(self.CHASE_TASK_NAME) if engine.task_manager.hasTaskNamed(self.TOP_DOWN_TASK_NAME): engine.task_manager.remove(self.TOP_DOWN_TASK_NAME) - self.current_track_vehicle = None + self.current_track_agent = None if self.registered: self.unregister() # self.camera.node().getDisplayRegion(0).clearDrawCallback() diff --git a/metadrive/engine/interface.py b/metadrive/engine/interface.py index 2fadbc31f..3961f0b8d 100644 --- a/metadrive/engine/interface.py +++ b/metadrive/engine/interface.py @@ -44,8 +44,8 @@ def __init__(self, base_engine): self._is_showing_arrow = True # store the state of navigation mark def after_step(self): - if self.engine.current_track_vehicle is not None and self.need_interface and self.engine.mode != RENDER_MODE_NONE: - track_v = self.engine.current_track_vehicle + if self.engine.current_track_agent is not None and self.need_interface and self.engine.mode != RENDER_MODE_NONE: + track_v = self.engine.current_track_agent if self.dashboard is not None: self.dashboard.update_vehicle_state(track_v) self._render_contact_result(track_v.contact_results) diff --git a/metadrive/engine/scene_cull.py b/metadrive/engine/scene_cull.py index b3138653f..597d6248a 100644 --- a/metadrive/engine/scene_cull.py +++ b/metadrive/engine/scene_cull.py @@ -10,7 +10,7 @@ class SceneCull: """ Used to cull distant rendering object in MetaDrive to improve rendering efficiency """ - + raise DeprecationWarning # Visualization cull LOD_MAP_VIS_DIST = 300 # highly related to the render efficiency ! LOD_VEHICLE_VIS_DIST = 500 diff --git a/metadrive/envs/base_env.py b/metadrive/envs/base_env.py index 8525a72d1..4e4da41dc 100644 --- a/metadrive/envs/base_env.py +++ b/metadrive/envs/base_env.py @@ -19,7 +19,7 @@ from metadrive.engine.engine_utils import initialize_engine, close_engine, \ engine_initialized, set_global_random_seed, initialize_global_config, get_global_config from metadrive.engine.logger import get_logger, set_log_level -from metadrive.manager.agent_manager import AgentManager +from metadrive.manager.agent_manager import VehicleAgentManager from metadrive.manager.record_manager import RecordManager from metadrive.manager.replay_manager import ReplayManager from metadrive.obs.observation_base import DummyObservation @@ -34,8 +34,8 @@ # ===== agent ===== # Whether randomize the car model for the agent, randomly choosing from 4 types of cars random_agent_model=False, - # The ego config is: env_config["vehicle_config"].update(env_config"[target_vehicle_configs"]["default_agent"]) - target_vehicle_configs={DEFAULT_AGENT: dict(use_special_color=True, spawn_lane_index=None)}, + # The ego config is: env_config["vehicle_config"].update(env_config"[agent_configs"]["default_agent"]) + agent_configs={DEFAULT_AGENT: dict(use_special_color=True, spawn_lane_index=None)}, # ===== multi-agent ===== # This should be >1 in MARL envs, or set to -1 for spawning as many vehicles as possible. @@ -181,8 +181,6 @@ use_render=False, # (width, height), if set to None, it will be automatically determined window_size=(1200, 900), - # When main_camera is not the image_source, whether to reduce the window size to (1,1) for boosting efficiency - auto_resize_window=True, # Physics world step is 0.02s and will be repeated for decision_repeat times per env.step() physics_world_step_size=2e-2, decision_repeat=5, @@ -282,7 +280,7 @@ def __init__(self, config: dict = None): config = {} self.logger = get_logger() set_log_level(config.get("log_level", logging.DEBUG if config.get("debug", False) else logging.INFO)) - merged_config = self.default_config().update(config, False, ["target_vehicle_configs", "sensors"]) + merged_config = self.default_config().update(config, False, ["agent_configs", "sensors"]) global_config = self._post_process_config(merged_config) self.config = global_config @@ -300,7 +298,7 @@ def __init__(self, config: dict = None): # observation and action space self.agent_manager = self._get_agent_manager() - # lazy initialization, create the main vehicle in the lazy_init() func + # lazy initialization, create the main simulation in the lazy_init() func # self.engine: Optional[BaseEngine] = None # In MARL envs with respawn mechanism, varying episode lengths might happen. @@ -322,55 +320,45 @@ def _post_process_config(self, config): if not config["show_interface"]: config["interface_panel"] = [] - # Multi-Thread - if config["image_on_cuda"]: - self.logger.info("Turn Off Multi-thread rendering due to image_on_cuda=True") - config["multi_thread_render"] = False - # Adjust terrain n = config["map_region_size"] assert (n & (n - 1)) == 0 and 0 < n <= 2048, "map_region_size should be pow of 2 and < 2048." TerrainProperty.map_region_size = config["map_region_size"] - # Optimize main window - if not config["use_render"] and config["image_observation"] and \ - config["vehicle_config"]["image_source"] != "main_camera" and config["auto_resize_window"]: - # reduce size as we don't use the main camera content for improving efficiency - config["window_size"] = (1, 1) - config["show_interface"] = False - config["interface_panel"] = [] - self.logger.debug( - "Main window size is reduced to (1, 1) for boosting efficiency." - "To cancel this, set auto_resize_window = False" - ) + # Multi-Thread + # if config["image_on_cuda"]: + # self.logger.info("Turn Off Multi-thread rendering due to image_on_cuda=True") + # config["multi_thread_render"] = False # Optimize sensor creation in none-screen mode if not config["use_render"] and not config["image_observation"]: filtered = {} for id, cfg in config["sensors"].items(): - if not issubclass(cfg[0], BaseCamera): + if len(cfg) > 0 and not issubclass(cfg[0], BaseCamera) and id != "main_camera": filtered[id] = cfg config["sensors"] = filtered config["interface_panel"] = [] + # Check sensor existence + if config["use_render"] or "main_camera" in config["sensors"]: + config["sensors"]["main_camera"] = ("MainCamera", *config["window_size"]) + # Merge dashboard config with sensors to_use = [] - if not config["render_pipeline"] and config["show_interface"]: + if not config["render_pipeline"] and config["show_interface"] and "main_camera" in config["sensors"]: for panel in config["interface_panel"]: - if panel == "dashboard" and config["window_size"] != (1, 1): + if panel == "dashboard": config["sensors"]["dashboard"] = (DashBoard, ) if panel not in config["sensors"]: self.logger.warning( "Fail to add sensor: {} to the interface. Remove it from panel list!".format(panel) ) + elif panel == "main_camera": + self.logger.warning("main_camera can not be added to interface_panel, remove") else: to_use.append(panel) config["interface_panel"] = to_use - # Check sensor existence - if config["use_render"] or config["image_observation"]: - config["sensors"]["main_camera"] = ("MainCamera", *config["window_size"]) - # Merge default sensor to list sensor_cfg = self.default_config()["sensors"].update(config["sensors"]) config["sensors"] = sensor_cfg @@ -378,11 +366,8 @@ def _post_process_config(self, config): # show sensor lists _str = "Sensors: [{}]" sensors_str = "" - has_semantic_cam = False for _id, cfg in config["sensors"].items(): sensors_str += "{}: {}{}, ".format(_id, cfg[0] if isinstance(cfg[0], str) else cfg[0].__name__, cfg[1:]) - if not isinstance(cfg[0], str) and cfg[0].__name__ == "SemanticCamera": - has_semantic_cam = True self.logger.info(_str.format(sensors_str[:-2])) # determine render mode automatically @@ -392,7 +377,7 @@ def _post_process_config(self, config): else: config["_render_mode"] = RENDER_MODE_NONE for sensor in config["sensors"].values(): - if sensor[0] == "MainCamera" or (issubclass(BaseCamera, sensor[0]) and sensor[0] != DashBoard): + if sensor[0] == "MainCamera" or (issubclass(sensor[0], BaseCamera) and sensor[0] != DashBoard): config["_render_mode"] = RENDER_MODE_OFFSCREEN break self.logger.info("Render Mode: {}".format(config["_render_mode"])) @@ -407,20 +392,8 @@ def _post_process_config(self, config): def _get_observations(self) -> Dict[str, "BaseObservation"]: return {DEFAULT_AGENT: self.get_single_observation()} - def _get_observation_space(self): - return {v_id: obs.observation_space for v_id, obs in self.observations.items()} - - def _get_action_space(self): - if self.is_multi_agent: - return { - v_id: self.config["agent_policy"].get_input_space() - for v_id in self.config["target_vehicle_configs"].keys() - } - else: - return {DEFAULT_AGENT: self.config["agent_policy"].get_input_space()} - def _get_agent_manager(self): - return AgentManager(init_observations=self._get_observations(), init_action_space=self._get_action_space()) + return VehicleAgentManager(init_observations=self._get_observations()) def lazy_init(self): """ @@ -460,12 +433,12 @@ def step(self, actions: Union[Union[np.ndarray, list], Dict[AnyStr, Union[list, def _preprocess_actions(self, actions: Union[np.ndarray, Dict[AnyStr, np.ndarray], int]) \ -> Union[np.ndarray, Dict[AnyStr, np.ndarray], int]: if not self.is_multi_agent: - actions = {v_id: actions for v_id in self.vehicles.keys()} + actions = {v_id: actions for v_id in self.agents.keys()} else: if self.config["action_check"]: # Check whether some actions are not provided. given_keys = set(actions.keys()) - have_keys = set(self.vehicles.keys()) + have_keys = set(self.agents.keys()) assert given_keys == have_keys, "The input actions: {} have incompatible keys with existing {}!".format( given_keys, have_keys ) @@ -474,7 +447,7 @@ def _preprocess_actions(self, actions: Union[np.ndarray, Dict[AnyStr, np.ndarray # implementation, the "termination observation" will still be given in T=t-1. And at T=t, when you # collect action from policy(last_obs) without masking, then the action for "termination observation" # will still be computed. We just filter it out here. - actions = {v_id: actions[v_id] for v_id in self.vehicles.keys()} + actions = {v_id: actions[v_id] for v_id in self.agents.keys()} return actions def _step_simulator(self, actions): @@ -489,20 +462,20 @@ def _step_simulator(self, actions): scene_manager_after_step_infos, scene_manager_before_step_infos, allow_new_keys=True, without_copy=True ) - def reward_function(self, vehicle_id: str) -> Tuple[float, Dict]: + def reward_function(self, object_id: str) -> Tuple[float, Dict]: """ Override this func to get a new reward function - :param vehicle_id: name of this base vehicle + :param object_id: name of this object :return: reward, reward info """ self.logger.warning("Reward function is not implemented. Return reward = 0", extra={"log_once": True}) return 0, {} - def cost_function(self, vehicle_id: str) -> Tuple[float, Dict]: + def cost_function(self, object_id: str) -> Tuple[float, Dict]: self.logger.warning("Cost function is not implemented. Return cost = 0", extra={"log_once": True}) return 0, {} - def done_function(self, vehicle_id: str) -> Tuple[bool, Dict]: + def done_function(self, object_id: str) -> Tuple[bool, Dict]: self.logger.warning("Done function is not implemented. Return Done = False", extra={"log_once": True}) return False, {} @@ -524,23 +497,6 @@ def render(self, text: Optional[Union[dict, str]] = None, mode=None, *args, **kw "Panda Rendering is off now, can not render. Please set config['use_render'] = True!", exc_info={"log_once": True} ) - - # if mode != "human" and self.config["image_observation"]: - # # fetch img from img stack to be make this func compatible with other render func in RL setting - # return self.observations[DEFAULT_AGENT].img_obs.get_image() - # - # if mode == "rgb_array": - # assert self.config["use_render"], "You should create a Panda3d window before rendering images!" - # # if not hasattr(self, "temporary_img_obs"): - # # from metadrive.obs.image_obs import ImageObservation - # # image_source = "rgb_camera" - # # assert len(self.vehicles) == 1, "Multi-agent not supported yet!" - # # self.temporary_img_obs = ImageObservation(self.vehicles[DEFAULT_AGENT].config, image_source, False) - # # # else: - # # # raise ValueError("Not implemented yet!") - # # self.temporary_img_obs.observe(self.vehicles[DEFAULT_AGENT]) - # # return self.temporary_img_obs.get_image() - # return self.engine._get_window_image(return_bytes=return_bytes) return None def reset(self, seed: Union[None, int] = None): @@ -571,12 +527,12 @@ def reset(self, seed: Union[None, int] = None): self.top_down_renderer.clear() self.engine.top_down_renderer = None - self.dones = {agent_id: False for agent_id in self.vehicles.keys()} + self.dones = {agent_id: False for agent_id in self.agents.keys()} self.episode_rewards = defaultdict(float) self.episode_lengths = defaultdict(int) - assert (len(self.vehicles) == self.num_agents) or (self.num_agents == -1), \ - "Vehicles: {} != Num_agents: {}".format(len(self.vehicles), self.num_agents) + assert (len(self.agents) == self.num_agents) or (self.num_agents == -1), \ + "Agents: {} != Num_agents: {}".format(len(self.agents), self.num_agents) assert self.config is self.engine.global_config is get_global_config(), "Inconsistent config may bring errors!" return self._get_reset_return() @@ -589,17 +545,17 @@ def reset_sensors(self): if self.main_camera is not None: self.main_camera.reset() if hasattr(self, "agent_manager"): - bev_cam = self.main_camera.is_bird_view_camera() and self.main_camera.current_track_vehicle is not None - vehicles = list(self.engine.agents.values()) - current_track_vehicle = vehicles[0] + bev_cam = self.main_camera.is_bird_view_camera() and self.main_camera.current_track_agent is not None + agents = list(self.engine.agents.values()) + current_track_agent = agents[0] self.main_camera.set_follow_lane(self.config["use_chase_camera_follow_lane"]) - self.main_camera.track(current_track_vehicle) + self.main_camera.track(current_track_agent) if bev_cam: self.main_camera.stop_track() - self.main_camera.set_bird_view_pos(current_track_vehicle.position) + self.main_camera.set_bird_view_pos(current_track_agent.position) for name, sensor in self.engine.sensors.items(): if hasattr(sensor, "track") and name != "main_camera": - sensor.track(current_track_vehicle.origin, [0., 0.8, 1.5], [0, 0.59681, 0]) + sensor.track(current_track_agent.origin, [0., 0.8, 1.5], [0, 0.59681, 0]) def _get_reset_return(self): # TODO: figure out how to get the information of the before step @@ -613,7 +569,7 @@ def _get_reset_return(self): engine_info = merge_dicts( scene_manager_after_step_infos, scene_manager_before_step_infos, allow_new_keys=True, without_copy=True ) - for v_id, v in self.vehicles.items(): + for v_id, v in self.agents.items(): self.observations[v_id].reset(self, v) obses[v_id] = self.observations[v_id].observe(v) _, reward_infos[v_id] = self.reward_function(v_id) @@ -634,7 +590,7 @@ def _get_step_return(self, actions, engine_info): cost_infos = {} reward_infos = {} rewards = {} - for v_id, v in self.vehicles.items(): + for v_id, v in self.agents.items(): self.episode_lengths[v_id] += 1 rewards[v_id], reward_infos[v_id] = self.reward_function(v_id) self.episode_rewards[v_id] += rewards[v_id] @@ -645,10 +601,10 @@ def _get_step_return(self, actions, engine_info): obses[v_id] = o step_infos = concat_step_infos([engine_info, done_infos, reward_infos, cost_infos]) - truncateds = {k: step_infos[k].get(TerminationState.MAX_STEP, False) for k in self.vehicles.keys()} - terminateds = {k: self.dones[k] for k in self.vehicles.keys()} + truncateds = {k: step_infos[k].get(TerminationState.MAX_STEP, False) for k in self.agents.keys()} + terminateds = {k: self.dones[k] for k in self.agents.keys()} - # For extreme scenario only. Force to terminate all vehicles if the environmental step exceeds 5 times horizon. + # For extreme scenario only. Force to terminate all agents if the environmental step exceeds 5 times horizon. if self.config["horizon"] and self.episode_step > 5 * self.config["horizon"]: for k in truncateds: truncateds[k] = True @@ -685,19 +641,9 @@ def capture(self, file_name=None): self._capture_img.write(file_name) self.logger.info("Image is saved at: {}".format(file_name)) - def for_each_vehicle(self, func, *args, **kwargs): + def for_each_agent(self, func, *args, **kwargs): return self.agent_manager.for_each_active_agents(func, *args, **kwargs) - @property - def vehicle(self): - """A helper to return the vehicle only in the single-agent environment!""" - assert len(self.vehicles) == 1, ( - "env.vehicle is only supported in single-agent environment!" - if len(self.vehicles) > 1 else "Please initialize the environment first!" - ) - ego_v = self.vehicles[DEFAULT_AGENT] - return ego_v - def get_single_observation(self): if self.__class__ is BaseEnv: o = DummyObservation({}) @@ -710,7 +656,7 @@ def get_single_observation(self): return o def _wrap_as_single_agent(self, data): - return data[next(iter(self.vehicles.keys()))] + return data[next(iter(self.agents.keys()))] def seed(self, seed=None): if seed is not None: @@ -723,7 +669,7 @@ def current_seed(self): @property def observations(self): """ - Return observations of active and controllable vehicles + Return observations of active and controllable agents :return: Dict """ return self.agent_manager.get_observations() @@ -731,7 +677,7 @@ def observations(self): @property def observation_space(self) -> gym.Space: """ - Return observation spaces of active and controllable vehicles + Return observation spaces of active and controllable agents :return: Dict """ ret = self.agent_manager.get_observation_spaces() @@ -743,7 +689,8 @@ def observation_space(self) -> gym.Space: @property def action_space(self) -> gym.Space: """ - Return observation spaces of active and controllable vehicles + Return action spaces of active and controllable agents. Generally, it is defined in AgentManager. But you can + still overwrite this function to define the action space for the environment. :return: Dict """ ret = self.agent_manager.get_action_spaces() @@ -758,17 +705,30 @@ def vehicles(self): Return all active vehicles :return: Dict[agent_id:vehicle] """ + self.logger.warning("env.agents will be deprecated soon. Use env.agents instead", extra={"log_once": True}) + return self.agents + + @property + def vehicle(self): + self.logger.warning("env.agent will be deprecated soon. Use env.agent instead", extra={"log_once": True}) + return self.agent + + @property + def agents(self): + """ + Return all active agents + :return: Dict[agent_id:agent] + """ return self.agent_manager.active_agents - # @property - # def vehicles_including_just_terminated(self): - # """ - # Return all vehicles that occupy some space in current environments - # :return: Dict[agent_id:vehicle] - # """ - # ret = self.agent_manager.active_agents - # ret.update(self.agent_manager.just_terminated_agents) - # return ret + @property + def agent(self): + """A helper to return the agent only in the single-agent environment!""" + assert len(self.agents) == 1, ( + "env.agent is only supported in single-agent environment!" + if len(self.agents) > 1 else "Please initialize the environment first!" + ) + return self.agents[DEFAULT_AGENT] def setup_engine(self): """ @@ -801,8 +761,8 @@ def main_camera(self): return self.engine.main_camera @property - def current_track_vehicle(self): - return self.engine.current_track_vehicle + def current_track_agent(self): + return self.engine.current_track_agent @property def top_down_renderer(self): @@ -885,24 +845,24 @@ def switch_to_third_person_view(self): if self.main_camera is None: return self.main_camera.reset() - if self.config["prefer_track_agent"] is not None and self.config["prefer_track_agent"] in self.vehicles.keys(): - new_v = self.vehicles[self.config["prefer_track_agent"]] - current_track_vehicle = new_v + if self.config["prefer_track_agent"] is not None and self.config["prefer_track_agent"] in self.agents.keys(): + new_v = self.agents[self.config["prefer_track_agent"]] + current_track_agent = new_v else: if self.main_camera.is_bird_view_camera(): - current_track_vehicle = self.current_track_vehicle + current_track_agent = self.current_track_agent else: - vehicles = list(self.engine.agents.values()) - if len(vehicles) <= 1: + agents = list(self.engine.agents.values()) + if len(agents) <= 1: return - if self.current_track_vehicle in vehicles: - vehicles.remove(self.current_track_vehicle) - new_v = get_np_random().choice(vehicles) - current_track_vehicle = new_v - self.main_camera.track(current_track_vehicle) + if self.current_track_agent in agents: + agents.remove(self.current_track_agent) + new_v = get_np_random().choice(agents) + current_track_agent = new_v + self.main_camera.track(current_track_agent) for name, sensor in self.engine.sensors.items(): if hasattr(sensor, "track") and name != "main_camera": - sensor.track(current_track_vehicle.origin, [0., 0.8, 1.5], [0, 0.59681, 0]) + sensor.track(current_track_agent.origin, [0., 0.8, 1.5], [0, 0.59681, 0]) return def next_seed_reset(self): diff --git a/metadrive/envs/gym_wrapper.py b/metadrive/envs/gym_wrapper.py index f4e7ef75d..5a2531163 100644 --- a/metadrive/envs/gym_wrapper.py +++ b/metadrive/envs/gym_wrapper.py @@ -135,7 +135,7 @@ def __setattr__(self, name, value): assert isinstance(env.action_space, gymnasium.spaces.Space) for s in range(600): o, r, d, i = env.step([0, -1]) - env.vehicle.set_velocity([0, 0]) + env.agent.set_velocity([0, 0]) if d: assert s == env.config["horizon"] and i["max_step"] and d break diff --git a/metadrive/envs/legacy_envs/mix_waymo_pg_env.py b/metadrive/envs/legacy_envs/mix_waymo_pg_env.py index 18b967629..c04e12acf 100644 --- a/metadrive/envs/legacy_envs/mix_waymo_pg_env.py +++ b/metadrive/envs/legacy_envs/mix_waymo_pg_env.py @@ -134,10 +134,10 @@ def change_suite(self): self._init_pg_episode() def _init_pg_episode(self): - self.config["target_vehicle_configs"]["default_agent"]["spawn_lane_index"] = ( + self.config["agent_configs"]["default_agent"]["spawn_lane_index"] = ( FirstPGBlock.NODE_1, FirstPGBlock.NODE_2, self.engine.np_random.randint(3) ) - self.config["target_vehicle_configs"]["default_agent"]["destination"] = None + self.config["agent_configs"]["default_agent"]["destination"] = None def reset(self, seed: Union[None, int] = None): self.change_suite() @@ -156,16 +156,16 @@ def reset(self, seed: Union[None, int] = None): self.top_down_renderer.clear() self.engine.top_down_renderer = None - self.dones = {agent_id: False for agent_id in self.vehicles.keys()} + self.dones = {agent_id: False for agent_id in self.agents.keys()} self.episode_rewards = defaultdict(float) self.episode_lengths = defaultdict(int) - assert (len(self.vehicles) == self.num_agents) or (self.num_agents == -1) + assert (len(self.agents) == self.num_agents) or (self.num_agents == -1) # ^^^^^^ same as Base Env ^^^^^ if not self.is_current_real_data: # give a initial speed when on metadrive - self.vehicle.set_velocity(self.vehicle.heading, self.engine.np_random.randint(10)) + self.agent.set_velocity(self.agent.heading, self.engine.np_random.randint(10)) return self._get_reset_return() def _reset_global_seed(self, force_seed=None): diff --git a/metadrive/envs/marl_envs/marl_bidirection.py b/metadrive/envs/marl_envs/marl_bidirection.py index e1eb2879c..37fe60bff 100644 --- a/metadrive/envs/marl_envs/marl_bidirection.py +++ b/metadrive/envs/marl_envs/marl_bidirection.py @@ -97,7 +97,7 @@ def reward_function(self, vehicle_id: str): :param vehicle_id: id of BaseVehicle :return: reward """ - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() # Reward for moving forward in current lane @@ -192,7 +192,7 @@ def _expert(): ) ) break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -222,7 +222,7 @@ def _vis_debug_respawn(): total_r = 0 ep_s = 0 for i in range(1, 100000): - action = {k: [.0, 1.0] for k in env.vehicles.keys()} + action = {k: [.0, 1.0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(action) for r_ in r.values(): total_r += r_ @@ -243,7 +243,7 @@ def _vis_debug_respawn(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -272,7 +272,7 @@ def _vis(): total_r = 0 ep_s = 0 for i in range(1, 100000): - o, r, tm, tc, info = env.step({k: [1.0, .0] for k in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({k: [1.0, .0] for k in env.agents.keys()}) for r_ in r.values(): total_r += r_ ep_s += 1 @@ -283,12 +283,12 @@ def _vis(): "cam_x": env.main_camera.camera_x, "cam_y": env.main_camera.camera_y, "cam_z": env.main_camera.top_down_camera_height, - "current_track_v": env.agent_manager.object_to_agent(env.current_track_vehicle.name) + "current_track_v": env.agent_manager.object_to_agent(env.current_track_agent.name) } - track_v = env.agent_manager.object_to_agent(env.current_track_vehicle.name) + track_v = env.agent_manager.object_to_agent(env.current_track_agent.name) render_text["tack_v_reward"] = r[track_v] - render_text["dist_to_right"] = env.current_track_vehicle.dist_to_right_side - render_text["dist_to_left"] = env.current_track_vehicle.dist_to_left_side + render_text["dist_to_right"] = env.current_track_agent.dist_to_right_side + render_text["dist_to_left"] = env.current_track_agent.dist_to_left_side env.render(text=render_text) if tm["__all__"]: print( @@ -297,7 +297,7 @@ def _vis(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -371,7 +371,7 @@ def _long_run(): if (step + 1) % 200 == 0: print( "{}/{} Agents: {} {}\nO: {}\nR: {}\nD: {}\nI: {}\n\n".format( - step + 1, 10000, len(env.vehicles), list(env.vehicles.keys()), + step + 1, 10000, len(env.agents), list(env.agents.keys()), {k: (oo.shape, oo.mean(), oo.min(), oo.max()) for k, oo in o.items()}, r, d, i ) diff --git a/metadrive/envs/marl_envs/marl_bottleneck.py b/metadrive/envs/marl_envs/marl_bottleneck.py index 60dccc147..0bb5993bf 100644 --- a/metadrive/envs/marl_envs/marl_bottleneck.py +++ b/metadrive/envs/marl_envs/marl_bottleneck.py @@ -92,7 +92,7 @@ def reward_function(self, vehicle_id: str): :param vehicle_id: id of BaseVehicle :return: reward """ - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() # Reward for moving forward in current lane @@ -187,7 +187,7 @@ def _expert(): ) ) break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -217,7 +217,7 @@ def _vis_debug_respawn(): total_r = 0 ep_s = 0 for i in range(1, 100000): - action = {k: [.0, 1.0] for k in env.vehicles.keys()} + action = {k: [.0, 1.0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(action) for r_ in r.values(): total_r += r_ @@ -238,7 +238,7 @@ def _vis_debug_respawn(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -267,7 +267,7 @@ def _vis(): total_r = 0 ep_s = 0 for i in range(1, 100000): - o, r, tm, tc, info = env.step({k: [1.0, .0] for k in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({k: [1.0, .0] for k in env.agents.keys()}) for r_ in r.values(): total_r += r_ ep_s += 1 @@ -278,12 +278,12 @@ def _vis(): "cam_x": env.main_camera.camera_x, "cam_y": env.main_camera.camera_y, "cam_z": env.main_camera.top_down_camera_height, - "current_track_v": env.agent_manager.object_to_agent(env.current_track_vehicle.name) + "current_track_v": env.agent_manager.object_to_agent(env.current_track_agent.name) } - track_v = env.agent_manager.object_to_agent(env.current_track_vehicle.name) + track_v = env.agent_manager.object_to_agent(env.current_track_agent.name) render_text["tack_v_reward"] = r[track_v] - render_text["dist_to_right"] = env.current_track_vehicle.dist_to_right_side - render_text["dist_to_left"] = env.current_track_vehicle.dist_to_left_side + render_text["dist_to_right"] = env.current_track_agent.dist_to_right_side + render_text["dist_to_left"] = env.current_track_agent.dist_to_left_side env.render(text=render_text) if tm["__all__"]: print( @@ -292,7 +292,7 @@ def _vis(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -366,7 +366,7 @@ def _long_run(): if (step + 1) % 200 == 0: print( "{}/{} Agents: {} {}\nO: {}\nR: {}\nD: {}\nI: {}\n\n".format( - step + 1, 10000, len(env.vehicles), list(env.vehicles.keys()), + step + 1, 10000, len(env.agents), list(env.agents.keys()), {k: (oo.shape, oo.mean(), oo.min(), oo.max()) for k, oo in o.items()}, r, tm, i ) diff --git a/metadrive/envs/marl_envs/marl_inout_roundabout.py b/metadrive/envs/marl_envs/marl_inout_roundabout.py index 8da96ca8e..35a92490c 100644 --- a/metadrive/envs/marl_envs/marl_inout_roundabout.py +++ b/metadrive/envs/marl_envs/marl_inout_roundabout.py @@ -202,7 +202,7 @@ def _expert(): ) ) break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -232,7 +232,7 @@ def _vis_debug_respawn(): total_r = 0 ep_s = 0 for i in range(1, 100000): - action = {k: [0.0, .0] for k in env.vehicles.keys()} + action = {k: [0.0, .0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(action) for r_ in r.values(): total_r += r_ @@ -253,7 +253,7 @@ def _vis_debug_respawn(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -282,7 +282,7 @@ def _vis(): total_r = 0 ep_s = 0 for i in range(1, 100000): - o, r, tm, tc, info = env.step({k: [0, .0] for k in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({k: [0, .0] for k in env.agents.keys()}) for r_ in r.values(): total_r += r_ ep_s += 1 @@ -302,7 +302,7 @@ def _vis(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -376,7 +376,7 @@ def _long_run(): if (step + 1) % 200 == 0: print( "{}/{} Agents: {} {}\nO: {}\nR: {}\nD: {}\nI: {}\n\n".format( - step + 1, 10000, len(env.vehicles), list(env.vehicles.keys()), + step + 1, 10000, len(env.agents), list(env.agents.keys()), {k: (oo.shape, oo.mean(), oo.min(), oo.max()) for k, oo in o.items()}, r, tm, i ) diff --git a/metadrive/envs/marl_envs/marl_intersection.py b/metadrive/envs/marl_envs/marl_intersection.py index 53b1d0bab..82f1cefb1 100644 --- a/metadrive/envs/marl_envs/marl_intersection.py +++ b/metadrive/envs/marl_envs/marl_intersection.py @@ -154,7 +154,7 @@ def _expert(): ) ) break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -184,7 +184,7 @@ def _vis_debug_respawn(): total_r = 0 ep_s = 0 for i in range(1, 100000): - action = {k: [0.0, .0] for k in env.vehicles.keys()} + action = {k: [0.0, .0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(action) for r_ in r.values(): total_r += r_ @@ -205,7 +205,7 @@ def _vis_debug_respawn(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -237,9 +237,9 @@ def _vis(): total_r = 0 ep_s = 0 for i in range(1, 100000): - actions = {k: [0.0, 1.0] for k in env.vehicles.keys()} - if len(env.vehicles) == 1: - actions = {k: [-0, 1.0] for k in env.vehicles.keys()} + actions = {k: [0.0, 1.0] for k in env.agents.keys()} + if len(env.agents) == 1: + actions = {k: [-0, 1.0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(actions) for r_ in r.values(): total_r += r_ @@ -251,7 +251,7 @@ def _vis(): # "cam_x": env.main_camera.camera_x, # "cam_y": env.main_camera.camera_y, # "cam_z": env.main_camera.top_down_camera_height, - # "alive": len(env.vehicles) + # "alive": len(env.agents) # } # env.render(text=render_text) # env.render(mode="top_down") @@ -263,7 +263,7 @@ def _vis(): ) env.reset() # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -337,7 +337,7 @@ def _long_run(): if (step + 1) % 200 == 0: print( "{}/{} Agents: {} {}\nO: {}\nR: {}\nD: {}\nI: {}\n\n".format( - step + 1, 10000, len(env.vehicles), list(env.vehicles.keys()), + step + 1, 10000, len(env.agents), list(env.agents.keys()), {k: (oo.shape, oo.mean(), oo.min(), oo.max()) for k, oo in o.items()}, r, tm, i ) diff --git a/metadrive/envs/marl_envs/marl_parking_lot.py b/metadrive/envs/marl_envs/marl_parking_lot.py index 80fd33b44..be9d318e5 100644 --- a/metadrive/envs/marl_envs/marl_parking_lot.py +++ b/metadrive/envs/marl_envs/marl_parking_lot.py @@ -331,7 +331,7 @@ def _expert(): ) ) break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -361,7 +361,7 @@ def _vis_debug_respawn(): total_r = 0 ep_s = 0 for i in range(1, 100000): - action = {k: [0.0, .0] for k in env.vehicles.keys()} + action = {k: [0.0, .0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(action) for r_ in r.values(): total_r += r_ @@ -382,7 +382,7 @@ def _vis_debug_respawn(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -416,16 +416,16 @@ def _vis(): total_r = 0 ep_s = 0 for i in range(1, 100000): - actions = {k: [1.0, .0] for k in env.vehicles.keys()} - if len(env.vehicles) == 1: - actions = {k: [-1.0, .0] for k in env.vehicles.keys()} + actions = {k: [1.0, .0] for k in env.agents.keys()} + if len(env.agents) == 1: + actions = {k: [-1.0, .0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(actions) for r_ in r.values(): total_r += r_ ep_s += 1 # d.update({"total_r": total_r, "episode length": ep_s}) - if len(env.vehicles) != 0: - v = env.current_track_vehicle + if len(env.agents) != 0: + v = env.current_track_agent dist = v.dist_to_left_side, v.dist_to_right_side ckpt_idx = v.navigation._target_checkpoints_index else: @@ -438,13 +438,13 @@ def _vis(): "cam_x": env.main_camera.camera_x, "cam_y": env.main_camera.camera_y, "cam_z": env.main_camera.top_down_camera_height, - "alive": len(env.vehicles), + "alive": len(env.agents), "dist_right_left": dist, "ckpt_idx": ckpt_idx, "parking_space_num": len(env.engine.spawn_manager.parking_space_available) } - if len(env.vehicles) > 0: - v = env.current_track_vehicle + if len(env.agents) > 0: + v = env.current_track_agent # print(v.navigation.checkpoints) render_text["current_road"] = v.navigation.current_road @@ -470,7 +470,7 @@ def _vis(): ) env.reset() # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -545,7 +545,7 @@ def _long_run(): if (step + 1) % 200 == 0: print( "{}/{} Agents: {} {}\nO: {}\nR: {}\nD: {}\nI: {}\n\n".format( - step + 1, 10000, len(env.vehicles), list(env.vehicles.keys()), + step + 1, 10000, len(env.agents), list(env.agents.keys()), {k: (oo.shape, oo.mean(), oo.min(), oo.max()) for k, oo in o.items()}, r, d, i ) diff --git a/metadrive/envs/marl_envs/marl_racing_env.py b/metadrive/envs/marl_envs/marl_racing_env.py index 7ab8def26..6b57814c9 100644 --- a/metadrive/envs/marl_envs/marl_racing_env.py +++ b/metadrive/envs/marl_envs/marl_racing_env.py @@ -370,8 +370,8 @@ def done_function(self, vehicle_id): "Episode ended! Scenario Index: {} Reason: IDLE.".format(self.current_seed), extra={"log_once": True} ) - done_info[TerminationState.CRASH_SIDEWALK] = self.vehicles[vehicle_id].crash_sidewalk - if self.config["crash_sidewalk_done"] and self.vehicles[vehicle_id].crash_sidewalk: + done_info[TerminationState.CRASH_SIDEWALK] = self.agents[vehicle_id].crash_sidewalk + if self.config["crash_sidewalk_done"] and self.agents[vehicle_id].crash_sidewalk: done = True self.logger.info( "Episode ended! Scenario Index: {} Reason: CRASH_SIDEWALK.".format(self.current_seed), @@ -399,7 +399,7 @@ def reward_function(self, vehicle_id): :param vehicle_id: id of BaseVehicle :return: reward """ - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() # Reward for moving forward in current lane @@ -463,7 +463,7 @@ def _vis(generate_video=False): try: for i in range(1, 100000): - o, r, tm, tc, info = env.step({k: [-0.0, 1] for k in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({k: [-0.0, 1] for k in env.agents.keys()}) for r_ in r.values(): total_r += r_ ep_s += 1 diff --git a/metadrive/envs/marl_envs/marl_tollgate.py b/metadrive/envs/marl_envs/marl_tollgate.py index 04b9cb2c6..9dd640144 100644 --- a/metadrive/envs/marl_envs/marl_tollgate.py +++ b/metadrive/envs/marl_envs/marl_tollgate.py @@ -195,7 +195,7 @@ def reward_function(self, vehicle_id: str): :param vehicle_id: id of BaseVehicle :return: reward """ - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() # Reward for moving forward in current lane @@ -334,7 +334,7 @@ def _expert(): ) ) break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -364,7 +364,7 @@ def _vis_debug_respawn(): total_r = 0 ep_s = 0 for i in range(1, 100000): - action = {k: [.0, 1.0] for k in env.vehicles.keys()} + action = {k: [.0, 1.0] for k in env.agents.keys()} o, r, tm, tc, info = env.step(action) for r_ in r.values(): total_r += r_ @@ -385,7 +385,7 @@ def _vis_debug_respawn(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -419,7 +419,7 @@ def _vis(): total_r = 0 ep_s = 0 for i in range(1, 100000): - o, r, tm, tc, info = env.step({k: [0, 1] for k in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({k: [0, 1] for k in env.agents.keys()}) for r_ in r.values(): total_r += r_ ep_s += 1 @@ -431,14 +431,14 @@ def _vis(): "cam_y": env.main_camera.camera_y, "cam_z": env.main_camera.top_down_camera_height } - track_v = env.agent_manager.object_to_agent(env.current_track_vehicle.name) + track_v = env.agent_manager.object_to_agent(env.current_track_agent.name) if track_v in r: render_text["tack_v_reward"] = r[track_v] - render_text["dist_to_right"] = env.current_track_vehicle.dist_to_right_side - render_text["dist_to_left"] = env.current_track_vehicle.dist_to_left_side - render_text["overspeed"] = env.current_track_vehicle.overspeed - render_text["lane"] = env.current_track_vehicle.lane_index - render_text["block"] = env.current_track_vehicle.navigation.current_road.block_ID() + render_text["dist_to_right"] = env.current_track_agent.dist_to_right_side + render_text["dist_to_left"] = env.current_track_agent.dist_to_left_side + render_text["overspeed"] = env.current_track_agent.overspeed + render_text["lane"] = env.current_track_agent.lane_index + render_text["block"] = env.current_track_agent.navigation.current_road.block_ID() env.render(text=render_text) if tm["__all__"]: print(info) @@ -448,7 +448,7 @@ def _vis(): ) ) # break - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -522,7 +522,7 @@ def _long_run(): if (step + 1) % 200 == 0: print( "{}/{} Agents: {} {}\nO: {}\nR: {}\nD: {}\nI: {}\n\n".format( - step + 1, 10000, len(env.vehicles), list(env.vehicles.keys()), + step + 1, 10000, len(env.agents), list(env.agents.keys()), {k: (oo.shape, oo.mean(), oo.min(), oo.max()) for k, oo in o.items()}, r, tm, i ) diff --git a/metadrive/envs/marl_envs/multi_agent_metadrive.py b/metadrive/envs/marl_envs/multi_agent_metadrive.py index 0370ab679..b1d7e1661 100644 --- a/metadrive/envs/marl_envs/multi_agent_metadrive.py +++ b/metadrive/envs/marl_envs/multi_agent_metadrive.py @@ -44,7 +44,7 @@ vehicle_model="static_default", ), sensors=dict(lidar=(Lidar, )), - target_vehicle_configs=dict(), # will be filled automatically + agent_configs=dict(), # will be filled automatically # ===== New Reward Setting ===== out_of_road_penalty=10, @@ -82,7 +82,7 @@ def _post_process_config(self, config): config = super(MultiAgentMetaDrive, self)._post_process_config(config) ret_config = config # merge basic vehicle config into target vehicle config - target_vehicle_configs = dict() + agent_configs = dict() num_agents = ( ret_config["num_agents"] if ret_config["num_agents"] != -1 else SpawnManager.max_capacity( config["spawn_roads"], @@ -93,21 +93,21 @@ def _post_process_config(self, config): for id in range(num_agents): agent_id = "agent{}".format(id) config = copy.deepcopy(ret_config["vehicle_config"]) - if agent_id in ret_config["target_vehicle_configs"]: + if agent_id in ret_config["agent_configs"]: config["_specified_spawn_lane"] = ( - True if "spawn_lane_index" in ret_config["target_vehicle_configs"][agent_id] else False + True if "spawn_lane_index" in ret_config["agent_configs"][agent_id] else False ) config["_specified_destination"] = ( - True if "destination" in ret_config["target_vehicle_configs"][agent_id] else False + True if "destination" in ret_config["agent_configs"][agent_id] else False ) - config.update(ret_config["target_vehicle_configs"][agent_id]) - target_vehicle_configs[agent_id] = config - ret_config["target_vehicle_configs"] = target_vehicle_configs + config.update(ret_config["agent_configs"][agent_id]) + agent_configs[agent_id] = config + ret_config["agent_configs"] = agent_configs # if ret_config["use_render"] and ret_config["disable_model_compression"]: # logging.warning("Turn disable_model_compression=True can decrease the loading time!") if "prefer_track_agent" in config and config["prefer_track_agent"]: - ret_config["target_vehicle_configs"][config["prefer_track_agent"]]["use_special_color"] = True + ret_config["agent_configs"][config["prefer_track_agent"]]["use_special_color"] = True ret_config["vehicle_config"]["random_agent_model"] = ret_config["random_agent_model"] return ret_config @@ -157,7 +157,7 @@ def _after_vehicle_done( for dead_vehicle_id, termed in terminated.items(): if termed or truncated[dead_vehicle_id]: # finish all terminated and truncated vehicles - self.agent_manager.finish( + self.agent_manager._finish( dead_vehicle_id, ignore_delay_done=info[dead_vehicle_id].get(TerminationState.SUCCESS, False), ) @@ -166,12 +166,12 @@ def _after_vehicle_done( def _update_camera_after_finish(self): if (self.main_camera is not None - and self.current_track_vehicle.id not in self.engine.agent_manager._active_objects + and self.current_track_agent.id not in self.engine.agent_manager._active_objects and self.engine.task_manager.hasTaskNamed(self.main_camera.CHASE_TASK_NAME)): self.switch_to_third_person_view() def _get_observations(self): - return {name: self.get_single_observation() for name in self.config["target_vehicle_configs"].keys()} + return {name: self.get_single_observation() for name in self.config["agent_configs"].keys()} def _respawn_vehicles(self, randomize_position=False): new_obs_dict = {} @@ -231,7 +231,7 @@ def _test(): total_r = 0 for i in range(1, 100000): # o, r, tm, tc, info = env.step(env.action_space.sample()) - o, r, tm, tc, info = env.step({v_id: [0, 1] for v_id in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({v_id: [0, 1] for v_id in env.agents.keys()}) for r_ in r.values(): total_r += r_ # o, r, tm, tc, info = env.step([0,1]) @@ -239,7 +239,7 @@ def _test(): # d.update({"total_r": total_r}) # env.render(text=d) env.render(mode="top_down") - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() @@ -258,7 +258,7 @@ def _vis(): "vehicle_config": { "vehicle_model": "s" }, - "target_vehicle_configs": { + "agent_configs": { "agent0": { "vehicle_model": "static_default" }, @@ -277,14 +277,14 @@ def _vis(): total_r = 0 for i in range(1, 100000): # o, r, tm, tc, info = env.step(env.action_space.sample()) - o, r, tm, tc, info = env.step({v_id: [0.0, 0.0] for v_id in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({v_id: [0.0, 0.0] for v_id in env.agents.keys()}) for r_ in r.values(): total_r += r_ # o, r, tm, tc, info = env.step([0,1]) # tm.update({"total_r": total_r}) env.render(mode="top_down") # env.reset() - if len(env.vehicles) == 0: + if len(env.agents) == 0: total_r = 0 print("Reset") env.reset() diff --git a/metadrive/envs/marl_envs/tinyinter.py b/metadrive/envs/marl_envs/tinyinter.py index 45533f76d..9746d3084 100644 --- a/metadrive/envs/marl_envs/tinyinter.py +++ b/metadrive/envs/marl_envs/tinyinter.py @@ -4,7 +4,7 @@ import numpy as np from metadrive.envs.marl_envs.marl_intersection import MultiAgentIntersectionEnv -from metadrive.manager.agent_manager import AgentManager +from metadrive.manager.agent_manager import VehicleAgentManager from metadrive.obs.state_obs import LidarStateObservation from metadrive.policy.idm_policy import IDMPolicy from metadrive.utils import Config @@ -220,12 +220,10 @@ def act(self, *args, **kwargs): return [0, 0] -class MixedIDMAgentManager(AgentManager): +class MixedIDMAgentManager(VehicleAgentManager): """In this manager, we can replace part of RL policy by IDM policy""" - def __init__(self, init_observations, init_action_space, num_RL_agents, ignore_delay_done=None, target_speed=10): - super(MixedIDMAgentManager, self).__init__( - init_observations=init_observations, init_action_space=init_action_space - ) + def __init__(self, init_observations, num_RL_agents, ignore_delay_done=None, target_speed=10): + super(MixedIDMAgentManager, self).__init__(init_observations=init_observations) self.num_RL_agents = num_RL_agents self.RL_agents = set() self.dying_RL_agents = set() @@ -261,13 +259,13 @@ def get_action_spaces(self): else: return ret - def finish(self, agent_name, ignore_delay_done=False): + def _finish(self, agent_name, ignore_delay_done=False): # ignore_delay_done = True if self.ignore_delay_done is not None: ignore_delay_done = self.ignore_delay_done if agent_name in self.RL_agents: self.dying_RL_agents.add(agent_name) - super(MixedIDMAgentManager, self).finish(agent_name, ignore_delay_done) + super(MixedIDMAgentManager, self)._finish(agent_name, ignore_delay_done) def _remove_vehicle(self, v): agent_name = self.object_to_agent(v.name) @@ -277,7 +275,7 @@ def _remove_vehicle(self, v): self.dying_RL_agents.remove(agent_name) super(MixedIDMAgentManager, self)._remove_vehicle(v) - def _get_vehicles(self, config_dict: dict): + def _create_agents(self, config_dict: dict): from metadrive.component.vehicle.vehicle_type import random_vehicle_type, vehicle_type ret = {} for agent_count, (agent_id, v_config) in enumerate(config_dict.items()): @@ -396,7 +394,7 @@ def _preprocess_actions(self, actions): if self.num_RL_agents == self.num_agents: return super(MultiAgentTinyInter, self)._preprocess_actions(actions) - actions = {v_id: actions[v_id] for v_id in self.vehicles.keys() if v_id in self.agent_manager.RL_agents} + actions = {v_id: actions[v_id] for v_id in self.agents.keys() if v_id in self.agent_manager.RL_agents} return actions def __init__(self, config=None): @@ -407,7 +405,6 @@ def __init__(self, config=None): # else: self.agent_manager = MixedIDMAgentManager( init_observations=self._get_observations(), - init_action_space=self._get_action_space(), num_RL_agents=self.num_RL_agents, ignore_delay_done=self.config["ignore_delay_done"], target_speed=self.config["target_speed"] @@ -455,7 +452,7 @@ def get_single_observation(self): for i in range(1, 100000): o, r, tm, tc, info = env.step({k: [0.0, 0.0] for k in env.action_space.sample().keys()}) # env.render("top_down", camera_position=(42.5, 0), film_size=(500, 500)) - vehicles = env.vehicles + vehicles = env.agents for k, v in tm.items(): if v and k in info: diff --git a/metadrive/envs/metadrive_env.py b/metadrive/envs/metadrive_env.py index 1b8cd1a84..011141986 100644 --- a/metadrive/envs/metadrive_env.py +++ b/metadrive/envs/metadrive_env.py @@ -58,7 +58,7 @@ # ===== Agent ===== random_spawn_lane_index=True, vehicle_config=dict(navigation_module=NodeNetworkNavigation), - target_vehicle_configs={ + agent_configs={ DEFAULT_AGENT: dict( use_special_color=True, spawn_lane_index=(FirstPGBlock.NODE_1, FirstPGBlock.NODE_2, 0), @@ -121,12 +121,12 @@ def _post_process_config(self, config): config["vehicle_config"]["random_agent_model"] = config["random_agent_model"] target_v_config = copy.deepcopy(config["vehicle_config"]) if not config["is_multi_agent"]: - target_v_config.update(config["target_vehicle_configs"][DEFAULT_AGENT]) - config["target_vehicle_configs"][DEFAULT_AGENT] = target_v_config + target_v_config.update(config["agent_configs"][DEFAULT_AGENT]) + config["agent_configs"][DEFAULT_AGENT] = target_v_config return config def done_function(self, vehicle_id: str): - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] done = False max_step = self.config["horizon"] is not None and self.episode_lengths[vehicle_id] >= self.config["horizon"] done_info = { @@ -139,7 +139,7 @@ def done_function(self, vehicle_id: str): TerminationState.SUCCESS: self._is_arrive_destination(vehicle), TerminationState.MAX_STEP: max_step, TerminationState.ENV_SEED: self.current_seed, - # TerminationState.CURRENT_BLOCK: self.vehicle.navigation.current_road.block_ID(), + # TerminationState.CURRENT_BLOCK: self.agent.navigation.current_road.block_ID(), # crash_vehicle=False, crash_object=False, crash_building=False, out_of_road=False, arrive_dest=False, } @@ -199,7 +199,7 @@ def done_function(self, vehicle_id: str): return done, done_info def cost_function(self, vehicle_id: str): - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() step_info["cost"] = 0 if self._is_out_of_road(vehicle): @@ -242,7 +242,7 @@ def reward_function(self, vehicle_id: str): :param vehicle_id: id of BaseVehicle :return: reward """ - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() # Reward for moving forward in current lane diff --git a/metadrive/envs/scenario_env.py b/metadrive/envs/scenario_env.py index d7909f2b0..7831742be 100644 --- a/metadrive/envs/scenario_env.py +++ b/metadrive/envs/scenario_env.py @@ -126,7 +126,7 @@ def setup_engine(self): self.engine.register_manager("curriculum_manager", ScenarioCurriculumManager()) def done_function(self, vehicle_id: str): - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] done = False max_step = self.config["horizon"] is not None and self.episode_lengths[vehicle_id] >= self.config["horizon"] done_info = { @@ -139,7 +139,7 @@ def done_function(self, vehicle_id: str): TerminationState.SUCCESS: self._is_arrive_destination(vehicle), TerminationState.MAX_STEP: max_step, TerminationState.ENV_SEED: self.current_seed, - # TerminationState.CURRENT_BLOCK: self.vehicle.navigation.current_road.block_ID(), + # TerminationState.CURRENT_BLOCK: self.agent.navigation.current_road.block_ID(), # crash_vehicle=False, crash_object=False, crash_building=False, out_of_road=False, arrive_dest=False, } @@ -193,7 +193,7 @@ def msg(reason): return done, done_info def cost_function(self, vehicle_id: str): - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict(num_crash_object=0, num_crash_human=0, num_crash_vehicle=0, num_on_line=0) step_info["cost"] = 0 if vehicle.on_yellow_continuous_line or vehicle.crash_sidewalk or vehicle.on_white_continuous_line: @@ -219,7 +219,7 @@ def reward_function(self, vehicle_id: str): :param vehicle_id: id of BaseVehicle :return: reward """ - vehicle = self.vehicles[vehicle_id] + vehicle = self.agents[vehicle_id] step_info = dict() # Reward for moving forward in current lane @@ -421,8 +421,8 @@ def _reset_global_seed(self, force_seed=None): for t in range(10000): o, r, tm, tc, info = env.step([0, 0]) assert env.observation_space.contains(o) - c_lane = env.vehicle.lane - long, lat, = c_lane.local_coordinates(env.vehicle.position) + c_lane = env.agent.lane + long, lat, = c_lane.local_coordinates(env.agent.position) # if env.config["use_render"]: env.render( text={ diff --git a/metadrive/envs/varying_dynamics_env.py b/metadrive/envs/varying_dynamics_env.py index 8561929e7..9e8326b42 100644 --- a/metadrive/envs/varying_dynamics_env.py +++ b/metadrive/envs/varying_dynamics_env.py @@ -9,7 +9,7 @@ """ from metadrive.envs.metadrive_env import MetaDriveEnv -from metadrive.manager.agent_manager import AgentManager +from metadrive.manager.agent_manager import VehicleAgentManager VaryingDynamicsConfig = dict( vehicle_config=dict(vehicle_model="varying_dynamics", ), @@ -25,7 +25,7 @@ ) -class VaryingDynamicsAgentManager(AgentManager): +class VaryingDynamicsAgentManager(VehicleAgentManager): def reset(self): # Randomize ego vehicle's dynamics here random_fields = self.engine.global_config["random_dynamics"] @@ -43,8 +43,8 @@ def reset(self): else: raise ValueError("Unknown parameter range: {}".format(para_range)) - assert len(self.engine.global_config["target_vehicle_configs"]) == 1, "Only supporting single-agent now!" - self.engine.global_config["target_vehicle_configs"]["default_agent"].update(dynamics) + assert len(self.engine.global_config["agent_configs"]) == 1, "Only supporting single-agent now!" + self.engine.global_config["agent_configs"]["default_agent"].update(dynamics) super(VaryingDynamicsAgentManager, self).reset() @@ -56,9 +56,7 @@ def default_config(cls): return config def _get_agent_manager(self): - return VaryingDynamicsAgentManager( - init_observations=self._get_observations(), init_action_space=self._get_action_space() - ) + return VaryingDynamicsAgentManager(init_observations=self._get_observations()) if __name__ == '__main__': @@ -68,7 +66,7 @@ def _get_agent_manager(self): }) for ep in range(3): obs, _ = env.reset() - print("Current Dynamics Parameters:", env.vehicle.get_dynamics_parameters()) + print("Current Dynamics Parameters:", env.agent.get_dynamics_parameters()) for step in range(1000): o, r, tm, tc, i = env.step(env.action_space.sample()) if tm or tc: diff --git a/metadrive/examples/Basic_MetaDrive_Usages.ipynb b/metadrive/examples/Basic_MetaDrive_Usages.ipynb index 8f83f12bd..cc8c258c8 100644 --- a/metadrive/examples/Basic_MetaDrive_Usages.ipynb +++ b/metadrive/examples/Basic_MetaDrive_Usages.ipynb @@ -140,7 +140,7 @@ "ep_reward = 0.0\n", "obs, info = env.reset()\n", "for i in range(1000):\n", - " obs, reward, terminated, truncated, info = env.step(expert(env.vehicle))\n", + " obs, reward, terminated, truncated, info = env.step(expert(env.agent))\n", " ep_reward += reward\n", " env.render(mode=\"top_down\", screen_record=True, screen_size=(500, 500))\n", " if terminated or truncated:\n", @@ -191,7 +191,7 @@ "obs, info = env.reset()\n", "frames = []\n", "for i in range(1000):\n", - " obs, reward, terminated, truncated, info = env.step(expert(env.vehicle))\n", + " obs, reward, terminated, truncated, info = env.step(expert(env.agent))\n", " ep_reward += reward\n", " ep_cost += info[\"cost\"]\n", " frame = env.render(mode=\"top_down\", window=False, screen_size=(500, 500))\n", diff --git a/metadrive/examples/custom_inramp_env.py b/metadrive/examples/custom_inramp_env.py index 0d2e5e6a1..78d2be4a7 100644 --- a/metadrive/examples/custom_inramp_env.py +++ b/metadrive/examples/custom_inramp_env.py @@ -23,13 +23,13 @@ env = MetaDriveEnv(config) try: o, _ = env.reset() - env.vehicle.expert_takeover = True + env.agent.expert_takeover = True assert isinstance(o, np.ndarray) print("The observation is an numpy array with shape: ", o.shape) for i in range(1, 1000000000): o, r, tm, tc, info = env.step([0, 0]) if (tm or tc) and info["arrive_dest"]: env.reset() - env.current_track_vehicle.expert_takeover = True + env.current_track_agent.expert_takeover = True finally: env.close() diff --git a/metadrive/examples/drive_in_multi_agent_env.py b/metadrive/examples/drive_in_multi_agent_env.py index c93f9cba9..56df4ac84 100644 --- a/metadrive/examples/drive_in_multi_agent_env.py +++ b/metadrive/examples/drive_in_multi_agent_env.py @@ -57,26 +57,26 @@ ) try: env.reset() - # if env.current_track_vehicle: - # env.current_track_vehicle.expert_takeover = True + # if env.current_track_agent: + # env.current_track_agent.expert_takeover = True print(HELP_MESSAGE) env.switch_to_third_person_view() # Default is in Top-down view, we switch to Third-person view. for i in range(1, 10000000000): - o, r, tm, tc, info = env.step({agent_id: [0, 0] for agent_id in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({agent_id: [0, 0] for agent_id in env.agents.keys()}) env.render( **extra_args, mode="top_down" if args.top_down else None, text={ "Quit": "ESC", - "Number of existing vehicles": len(env.vehicles), - "Tracked agent (Press Q)": env.engine.agent_manager.object_to_agent(env.current_track_vehicle.id), + "Number of existing vehicles": len(env.agents), + "Tracked agent (Press Q)": env.engine.agent_manager.object_to_agent(env.current_track_agent.id), "Keyboard Control": "W,A,S,D", - # "Auto-Drive (Switch mode: T)": "on" if env.current_track_vehicle.expert_takeover else "off", + # "Auto-Drive (Switch mode: T)": "on" if env.current_track_agent.expert_takeover else "off", } if not args.top_down else {} ) if tm["__all__"]: env.reset() - # if env.current_track_vehicle: - # env.current_track_vehicle.expert_takeover = True + # if env.current_track_agent: + # env.current_track_agent.expert_takeover = True finally: env.close() diff --git a/metadrive/examples/drive_in_safe_metadrive_env.py b/metadrive/examples/drive_in_safe_metadrive_env.py index 21ca0c62a..1044c2bba 100644 --- a/metadrive/examples/drive_in_safe_metadrive_env.py +++ b/metadrive/examples/drive_in_safe_metadrive_env.py @@ -15,21 +15,21 @@ try: env.reset() print(HELP_MESSAGE) - env.vehicle.expert_takeover = True + env.agent.expert_takeover = True for i in range(1, 1000000000): - previous_takeover = env.current_track_vehicle.expert_takeover + previous_takeover = env.current_track_agent.expert_takeover o, r, tm, tc, info = env.step([0, 0]) env.render( text={ - "Auto-Drive (Switch mode: T)": "on" if env.current_track_vehicle.expert_takeover else "off", + "Auto-Drive (Switch mode: T)": "on" if env.current_track_agent.expert_takeover else "off", "Total episode cost": env.episode_cost, "Keyboard Control": "W,A,S,D", } ) - if not previous_takeover and env.current_track_vehicle.expert_takeover: + if not previous_takeover and env.current_track_agent.expert_takeover: logging.warning("Auto-Drive mode may fail to solve some scenarios due to distribution mismatch") if (tm or tc) and info["arrive_dest"]: env.reset() - env.current_track_vehicle.expert_takeover = True + env.current_track_agent.expert_takeover = True finally: env.close() diff --git a/metadrive/examples/drive_in_single_agent_env.py b/metadrive/examples/drive_in_single_agent_env.py index bab98d152..59b3626d1 100644 --- a/metadrive/examples/drive_in_single_agent_env.py +++ b/metadrive/examples/drive_in_single_agent_env.py @@ -51,7 +51,7 @@ try: o, _ = env.reset(seed=21) print(HELP_MESSAGE) - env.vehicle.expert_takeover = True + env.agent.expert_takeover = True if args.observation == "rgb_camera": assert isinstance(o, dict) print("The observation is a dict with numpy arrays as values: ", {k: v.shape for k, v in o.items()}) @@ -62,7 +62,7 @@ o, r, tm, tc, info = env.step([0, 0]) env.render( text={ - "Auto-Drive (Switch mode: T)": "on" if env.current_track_vehicle.expert_takeover else "off", + "Auto-Drive (Switch mode: T)": "on" if env.current_track_agent.expert_takeover else "off", "Current Observation": args.observation, "Keyboard Control": "W,A,S,D", } @@ -73,6 +73,6 @@ cv2.waitKey(1) if (tm or tc) and info["arrive_dest"]: env.reset(env.current_seed + 1) - env.current_track_vehicle.expert_takeover = True + env.current_track_agent.expert_takeover = True finally: env.close() diff --git a/metadrive/examples/generate_video_for_bev_and_interface.py b/metadrive/examples/generate_video_for_bev_and_interface.py index c1d6e0af1..5f6b2a5c0 100644 --- a/metadrive/examples/generate_video_for_bev_and_interface.py +++ b/metadrive/examples/generate_video_for_bev_and_interface.py @@ -46,7 +46,7 @@ frame_count = 0 o, _ = env.reset(seed=start_seed) - env.vehicle.expert_takeover = True + env.agent.expert_takeover = True env.engine.force_fps.disable() while True: @@ -92,5 +92,5 @@ break o, _ = env.reset(seed=ep_count + start_seed) - env.vehicle.expert_takeover = True + env.agent.expert_takeover = True env.engine.force_fps.disable() diff --git a/metadrive/examples/profile_metadrive_marl.py b/metadrive/examples/profile_metadrive_marl.py index da7f6803d..786eb5373 100644 --- a/metadrive/examples/profile_metadrive_marl.py +++ b/metadrive/examples/profile_metadrive_marl.py @@ -20,13 +20,13 @@ reset_used_time = 0 action = [0.0, 1.] total_steps = args.num_steps - vehicle_num = [len(env.vehicles)] + vehicle_num = [len(env.agents)] for s in range(total_steps): - o, r, tm, tc, i = env.step({k: action for k in env.vehicles}) + o, r, tm, tc, i = env.step({k: action for k in env.agents}) if tm["__all__"]: start_reset = time.time() env.reset() - vehicle_num.append(len(env.vehicles)) + vehicle_num.append(len(env.agents)) reset_used_time += time.time() - start_reset if (s + 1) % 100 == 0: print( diff --git a/metadrive/examples/top_down_metadrive.py b/metadrive/examples/top_down_metadrive.py index 6b7331623..1c3f37ece 100644 --- a/metadrive/examples/top_down_metadrive.py +++ b/metadrive/examples/top_down_metadrive.py @@ -73,7 +73,7 @@ def close_event(): try: o, _ = env.reset() for i in range(1, 100000): - o, r, tm, tc, info = env.step(expert(env.vehicle)) + o, r, tm, tc, info = env.step(expert(env.agent)) env.render(mode="top_down", text={"Quit": "ESC"}, film_size=(2000, 2000)) if tm or tc: env.reset() diff --git a/metadrive/examples/verify_image_observation.py b/metadrive/examples/verify_image_observation.py index b5519cfaa..3b8869db9 100644 --- a/metadrive/examples/verify_image_observation.py +++ b/metadrive/examples/verify_image_observation.py @@ -22,7 +22,10 @@ def _test_rgb_camera_as_obs(render=False, image_on_cuda=True, debug=False, camer }, "semantic": { "semantic_camera": (SemanticCamera, *res) - } + }, + "main": { + "main_camera": () + }, } env = MetaDriveEnv( @@ -35,7 +38,7 @@ def _test_rgb_camera_as_obs(render=False, image_on_cuda=True, debug=False, camer image_on_cuda=True if image_on_cuda else False, use_render=False, vehicle_config=dict(image_source="{}_camera".format(camera)), - sensors=mapping[camera] if camera != "main" else {}, + sensors=mapping[camera], show_interface=False, show_logo=False, show_fps=False, diff --git a/metadrive/manager/agent_manager.py b/metadrive/manager/agent_manager.py index 0b42f5fe9..61d6b9eca 100644 --- a/metadrive/manager/agent_manager.py +++ b/metadrive/manager/agent_manager.py @@ -1,70 +1,40 @@ -import copy - -from metadrive.engine.logger import get_logger -from metadrive.policy.idm_policy import TrajectoryIDMPolicy -from typing import Dict - -from gymnasium.spaces import Box, Dict, MultiDiscrete, Discrete, Space - from metadrive.constants import DEFAULT_AGENT -from metadrive.manager.base_manager import BaseManager +from metadrive.engine.logger import get_logger +from metadrive.manager.base_manager import BaseAgentManager from metadrive.policy.AI_protect_policy import AIProtectPolicy +from metadrive.policy.idm_policy import TrajectoryIDMPolicy from metadrive.policy.manual_control_policy import ManualControlPolicy from metadrive.policy.replay_policy import ReplayTrafficParticipantPolicy logger = get_logger() -class AgentManager(BaseManager): +class VehicleAgentManager(BaseAgentManager): """ This class maintain the relationship between active agents in the environment with the underlying instance of objects. Note: - agent name: Agent name that exists in the environment, like agent0, agent1, .... + agent name: Agent name that exists in the environment, like default_agent, agent0, agent1, .... object name: The unique name for each object, typically be random string. """ INITIALIZED = False # when vehicles instances are created, it will be set to True - def __init__(self, init_observations, init_action_space): + def __init__(self, init_observations): """ The real init is happened in self.init(), in which super().__init__() will be called """ - # BaseVehicles which can be controlled by policies when env.step() called - self._active_objects = {} - # BaseVehicles which will be recycled after the delay_done time - self._dying_objects = {} - self._agents_finished_this_frame = dict() # for observation space - - self.next_agent_count = 0 - - # fake init. before creating engine and vehicles, it is necessary when all vehicles re-created in runtime - self.observations = copy.copy(init_observations) # its value is map before init() is called - self._init_observations = init_observations # map - # init spaces before initializing env.engine - observation_space = { - agent_id: single_obs.observation_space - for agent_id, single_obs in init_observations.items() - } - assert isinstance(init_action_space, dict) - assert isinstance(observation_space, dict) - self._init_observation_spaces = observation_space - self._init_action_spaces = init_action_space - self.observation_spaces = copy.copy(observation_space) - self.action_spaces = copy.copy(init_action_space) - self.episode_created_agents = None - - # this map will be override when the env.init() is first called and vehicles are made - self._agent_to_object = {k: k for k in self.observations.keys()} # no target vehicles created, fake init - self._object_to_agent = {k: k for k in self.observations.keys()} # no target vehicles created, fake init - - # get the value in init() + super(VehicleAgentManager, self).__init__(init_observations) + # For multi-agent env, None values is updated in init() self._allow_respawn = None - self._debug = None self._delay_done = None self._infinite_agents = None - def _get_vehicles(self, config_dict: dict): + self._dying_objects = {} # BaseVehicles which will be recycled after the delay_done time + self._agents_finished_this_frame = dict() # for observation space + self.next_agent_count = 0 + + def _create_agents(self, config_dict: dict): from metadrive.component.vehicle.vehicle_type import random_vehicle_type, vehicle_type ret = {} for agent_id, v_config in config_dict.items(): @@ -83,20 +53,24 @@ def _get_vehicles(self, config_dict: dict): @property def agent_policy(self): - # note: agent.id = object id - - if self.engine.global_config["manual_control"]: - if self.engine.global_config.get("use_AI_protector", False): + """ + Return the agent policy + Returns: Agent Poicy class + Make sure you access the global config via get_global_config() instead of self.engine.global_config + """ + from metadrive.engine.engine_utils import get_global_config + if get_global_config()["manual_control"]: + if get_global_config().get("use_AI_protector", False): policy = AIProtectPolicy else: policy = ManualControlPolicy else: - policy = self.engine.global_config["agent_policy"] + policy = get_global_config()["agent_policy"] return policy def before_reset(self): if not self.INITIALIZED: - super(AgentManager, self).__init__() + super(BaseAgentManager, self).__init__() self.INITIALIZED = True self.episode_created_agents = None @@ -105,11 +79,11 @@ def before_reset(self): for v in self.dying_agents.values(): self._remove_vehicle(v) - for v in self.get_vehicle_list(): + for v in list(self._active_objects.values()) + [v for (v, _) in self._dying_objects.values()]: if hasattr(v, "before_reset"): v.before_reset() - super(AgentManager, self).before_reset() + super(VehicleAgentManager, self).before_reset() def reset(self): """ @@ -117,68 +91,28 @@ def reset(self): """ self.random_spawn_lane_in_single_agent() config = self.engine.global_config - self._debug = config["debug"] self._delay_done = config["delay_done"] self._infinite_agents = config["num_agents"] == -1 self._allow_respawn = config["allow_respawn"] - self.episode_created_agents = self._get_vehicles( - config_dict=self.engine.global_config["target_vehicle_configs"] - ) + super(VehicleAgentManager, self).reset() def after_reset(self): - init_vehicles = self.episode_created_agents - vehicles_created = set(init_vehicles.keys()) - vehicles_in_config = set(self._init_observations.keys()) - assert vehicles_created == vehicles_in_config, "{} not defined in target vehicles config".format( - vehicles_created.difference(vehicles_in_config) - ) - - # it is used when reset() is called to reset its original agent_id - self._agent_to_object = {agent_id: vehicle.name for agent_id, vehicle in init_vehicles.items()} - self._object_to_agent = {vehicle.name: agent_id for agent_id, vehicle in init_vehicles.items()} - self._active_objects = {v.name: v for v in init_vehicles.values()} + super(VehicleAgentManager, self).after_reset() self._dying_objects = {} self._agents_finished_this_frame = dict() - - # real init {obj_name: space} map - self.observations = dict() - self.observation_spaces = dict() - self.action_spaces = dict() - for agent_id, vehicle in init_vehicles.items(): - self.observations[vehicle.name] = self._init_observations[agent_id] - obs_space = self._init_observation_spaces[agent_id] - self.observation_spaces[vehicle.name] = obs_space - action_space = self._init_action_spaces[agent_id] - self.action_spaces[vehicle.name] = action_space - assert isinstance(action_space, Space) - self.next_agent_count = len(init_vehicles) - - def set_state(self, state: dict, old_name_to_current=None): - super(AgentManager, self).set_state(state, old_name_to_current) - created_agents = state["created_agents"] - created_agents = {agent_id: old_name_to_current[obj_name] for agent_id, obj_name in created_agents.items()} - episode_created_agents = {} - for a_id, name in created_agents.items(): - episode_created_agents[a_id] = self.engine.get_objects([name])[name] - self.episode_created_agents = episode_created_agents - - def get_state(self): - ret = super(AgentManager, self).get_state() - agent_info = {agent_id: obj.name for agent_id, obj in self.episode_created_agents.items()} - ret["created_agents"] = agent_info - return ret + self.next_agent_count = len(self.episode_created_agents) def random_spawn_lane_in_single_agent(self): if not self.engine.global_config["is_multi_agent"] and \ self.engine.global_config.get("random_spawn_lane_index", False) and self.engine.current_map is not None: - spawn_road_start = self.engine.global_config["target_vehicle_configs"][DEFAULT_AGENT]["spawn_lane_index"][0] - spawn_road_end = self.engine.global_config["target_vehicle_configs"][DEFAULT_AGENT]["spawn_lane_index"][1] + spawn_road_start = self.engine.global_config["agent_configs"][DEFAULT_AGENT]["spawn_lane_index"][0] + spawn_road_end = self.engine.global_config["agent_configs"][DEFAULT_AGENT]["spawn_lane_index"][1] index = self.np_random.randint(self.engine.current_map.config["lane_num"]) - self.engine.global_config["target_vehicle_configs"][DEFAULT_AGENT]["spawn_lane_index"] = ( + self.engine.global_config["agent_configs"][DEFAULT_AGENT]["spawn_lane_index"] = ( spawn_road_start, spawn_road_end, index ) - def finish(self, agent_name, ignore_delay_done=False): + def _finish(self, agent_name, ignore_delay_done=False): """ ignore_delay_done: Whether to ignore the delay done. This is not required when the agent success the episode! """ @@ -201,9 +135,9 @@ def _check(self): def propose_new_vehicle(self): # Create a new vehicle. - agent_name = self.next_agent_id() - next_config = self.engine.global_config["target_vehicle_configs"]["agent0"] - vehicle = self._get_vehicles({agent_name: next_config})[agent_name] + agent_name = self._next_agent_id() + next_config = self.engine.global_config["agent_configs"]["agent0"] + vehicle = self._create_agents({agent_name: next_config})[agent_name] new_v_name = vehicle.name self._agent_to_object[agent_name] = new_v_name self._object_to_agent[new_v_name] = agent_name @@ -219,7 +153,7 @@ def propose_new_vehicle(self): vehicle.set_static(False) return agent_name, vehicle, step_info - def next_agent_id(self): + def _next_agent_id(self): ret = "agent{}".format(self.next_agent_count) self.next_agent_count += 1 return ret @@ -254,9 +188,8 @@ def try_actuate_agent(self, step_infos, stage="before_step"): def before_step(self): # not in replay mode + step_infos = super(VehicleAgentManager, self).before_step() self._agents_finished_this_frame = dict() - step_infos = self.try_actuate_agent(dict(), stage="before_step") - finished = set() for v_name in self._dying_objects.keys(): self._dying_objects[v_name][1] -= 1 @@ -268,17 +201,6 @@ def before_step(self): self._dying_objects.pop(v_name) return step_infos - def after_step(self, *args, **kwargs): - step_infos = self.for_each_active_agents(lambda v: v.after_step()) - step_infos = self.try_actuate_agent(step_infos, stage="after_step") - return step_infos - - def _translate(self, d): - return {self._object_to_agent[k]: v for k, v in d.items()} - - def get_vehicle_list(self): - return list(self._active_objects.values()) + [v for (v, _) in self._dying_objects.values()] - def get_observations(self): if hasattr(self, "engine") and self.engine.replay_episode: return self.engine.replay_manager.get_replay_agent_observations() @@ -302,28 +224,6 @@ def get_observation_spaces(self): ret[self.object_to_agent(obj_id)] = space return ret - def get_action_spaces(self): - ret = dict() - for obj_id, space in self.action_spaces.items(): - if self.is_active_object(obj_id): - ret[self.object_to_agent(obj_id)] = space - return ret - - def is_active_object(self, object_name): - if not self.INITIALIZED: - return True - return True if object_name in self._active_objects.keys() else False - - @property - def active_agents(self): - """ - Return Map - """ - if hasattr(self, "engine") and self.engine is not None and self.engine.replay_episode: - return self.engine.replay_manager.replay_agents - else: - return {self._object_to_agent[k]: v for k, v in self._active_objects.items()} - @property def dying_agents(self): assert not self.engine.replay_episode @@ -334,24 +234,12 @@ def just_terminated_agents(self): assert not self.engine.replay_episode ret = {} for agent_name, v_name in self._agents_finished_this_frame.items(): - v = self.get_object(v_name, raise_error=False) + v = self.get_agent(v_name, raise_error=False) ret[agent_name] = v return ret - @property - def active_objects(self): - """ - Return meta-data, a pointer, Caution ! - :return: Map - """ - raise DeprecationWarning("prohibit! Use active agent instead") - return self._active_objects - - def get_agent(self, agent_name): + def get_agent(self, agent_name, raise_error=True): object_name = self.agent_to_object(agent_name) - return self.get_object(object_name) - - def get_object(self, object_name, raise_error=True): if object_name in self._active_objects: return self._active_objects[object_name] elif object_name in self._dying_objects: @@ -362,46 +250,12 @@ def get_object(self, object_name, raise_error=True): else: return None - def object_to_agent(self, obj_name): - """ - We recommend to use engine.agent_to_object() or engine.object_to_agent() instead of the ones in agent_manager, - since this two functions DO NOT work when replaying episode. - :param obj_name: BaseVehicle name - :return: agent id - """ - # if obj_name not in self._active_objects.keys() and self.INITIALIZED: - # raise ValueError("You can not access a pending Object(BaseVehicle) outside the agent_manager!") - return self._object_to_agent[obj_name] - - def agent_to_object(self, agent_id): - """ - We recommend to use engine.agent_to_object() or engine.object_to_agent() instead of the ones in agent_manager, - since this two functions DO NOT work when replaying episode. - """ - return self._agent_to_object[agent_id] - def destroy(self): # when new agent joins in the game, we only change this two maps. - if self.INITIALIZED: - super(AgentManager, self).destroy() - self._agent_to_object = {} - self._object_to_agent = {} - - # BaseVehicles which can be controlled by policies when env.step() called - self._active_objects = {} - - # BaseVehicles which can be respawned + super(VehicleAgentManager, self).destroy() self._dying_objects = {} - - # Dict[object_id: value], init for **only** once after spawning vehicle - for obs in self.observations.values(): - obs.destroy() - self.observations = {} - self.observation_spaces = {} - self.action_spaces = {} - self.next_agent_count = 0 - self.INITIALIZED = False + self._agents_finished_this_frame = dict() def _put_to_dying_queue(self, v, ignore_delay_done=False): vehicle_name = v.name @@ -424,13 +278,3 @@ def allow_respawn(self): return True else: return False - - def for_each_active_agents(self, func, *args, **kwargs): - """ - This func is a function that take each vehicle as the first argument and *arg and **kwargs as others. - """ - assert len(self.active_agents) > 0, "Not enough vehicles exist!" - ret = dict() - for k, v in self.active_agents.items(): - ret[k] = func(v, *args, **kwargs) - return ret diff --git a/metadrive/manager/base_manager.py b/metadrive/manager/base_manager.py index 8c3b18601..04fe3076f 100644 --- a/metadrive/manager/base_manager.py +++ b/metadrive/manager/base_manager.py @@ -1,3 +1,9 @@ +import copy +from metadrive.engine.engine_utils import get_global_config +from metadrive.constants import DEFAULT_AGENT + +from gymnasium.spaces import Space + from metadrive.base_class.randomizable import Randomizable @@ -144,3 +150,254 @@ def get_metadata(self): """ assert self.episode_step == 0, "This func can only be called after env.reset() without any env.step() called" return {} + + @property + def global_config(self): + return get_global_config() + + +class BaseAgentManager(BaseManager): + """ + This manager allows one to use object like vehicles/traffic lights as agent with multi-agent support. + You would better make your own agent manager based on this class + """ + + INITIALIZED = False # when the reset() and init() are called, it will be set to True + + def __init__(self, init_observations): + """ + The real init is happened in self.init(), in which super().__init__() will be called + """ + # for getting {agent_id: BaseObject}, use agent_manager.active_agents + self._active_objects = {} # {object.id: BaseObject} + + # fake init. before creating engine, it is necessary when all objects re-created in runtime + self.observations = copy.copy(init_observations) # its value is map before init() is called + self._init_observations = init_observations # map + + # init spaces before initializing env.engine + observation_space = { + agent_id: single_obs.observation_space + for agent_id, single_obs in init_observations.items() + } + init_action_space = self._get_action_space() + assert isinstance(init_action_space, dict) + assert isinstance(observation_space, dict) + self._init_observation_spaces = observation_space + self._init_action_spaces = init_action_space + self.observation_spaces = copy.copy(observation_space) + self.action_spaces = copy.copy(init_action_space) + self.episode_created_agents = None + + # this map will be override when the env.init() is first called and objects are made + self._agent_to_object = {k: k for k in self.observations.keys()} # no target objects created, fake init + self._object_to_agent = {k: k for k in self.observations.keys()} # no target objects created, fake init + + self._debug = None + + def _get_action_space(self): + from metadrive.engine.engine_utils import get_global_config + if self.global_config["is_multi_agent"]: + return {v_id: self.agent_policy.get_input_space() for v_id in get_global_config()["agent_configs"].keys()} + else: + return {DEFAULT_AGENT: self.agent_policy.get_input_space()} + + @property + def agent_policy(self): + """ + Return the agent policy + Returns: Agent Poicy class + Make sure you access the global config via get_global_config() instead of self.engine.global_config + """ + from metadrive.engine.engine_utils import get_global_config + return get_global_config()["agent_policy"] + + def before_reset(self): + if not self.INITIALIZED: + super(BaseAgentManager, self).__init__() + self.INITIALIZED = True + self.episode_created_agents = None + + for v in list(self._active_objects.values()): + if hasattr(v, "before_reset"): + v.before_reset() + super(BaseAgentManager, self).before_reset() + + def _create_agents(self, config_dict): + """ + It should create a set of vehicles or other objects serving as agents + Args: + config_dict: + + Returns: + + """ + raise NotImplementedError + + def reset(self): + """ + Agent manager is really initialized after the BaseObject Instances are created + """ + config = self.engine.global_config + self._debug = config["debug"] + self.episode_created_agents = self._create_agents(config_dict=self.engine.global_config["agent_configs"]) + + def after_reset(self): + init_objects = self.episode_created_agents + objects_created = set(init_objects.keys()) + objects_in_config = set(self._init_observations.keys()) + assert objects_created == objects_in_config, "{} not defined in target objects config".format( + objects_created.difference(objects_in_config) + ) + + # it is used when reset() is called to reset its original agent_id + self._agent_to_object = {agent_id: object.name for agent_id, object in init_objects.items()} + self._object_to_agent = {object.name: agent_id for agent_id, object in init_objects.items()} + self._active_objects = {v.name: v for v in init_objects.values()} + + # real init {obj_name: space} map + self.observations = dict() + self.observation_spaces = dict() + self.action_spaces = dict() + for agent_id, object in init_objects.items(): + self.observations[object.name] = self._init_observations[agent_id] + obs_space = self._init_observation_spaces[agent_id] + self.observation_spaces[object.name] = obs_space + action_space = self._init_action_spaces[agent_id] + self.action_spaces[object.name] = action_space + assert isinstance(action_space, Space) + + def set_state(self, state: dict, old_name_to_current=None): + super(BaseAgentManager, self).set_state(state, old_name_to_current) + created_agents = state["created_agents"] + created_agents = {agent_id: old_name_to_current[obj_name] for agent_id, obj_name in created_agents.items()} + episode_created_agents = {} + for a_id, name in created_agents.items(): + episode_created_agents[a_id] = self.engine.get_objects([name])[name] + self.episode_created_agents = episode_created_agents + + def get_state(self): + ret = super(BaseAgentManager, self).get_state() + agent_info = {agent_id: obj.name for agent_id, obj in self.episode_created_agents.items()} + ret["created_agents"] = agent_info + return ret + + def try_actuate_agent(self, step_infos, stage="before_step"): + """ + Some policies should make decision before physics world actuation, in particular, those need decision-making + But other policies like ReplayPolicy should be called in after_step, as they already know the final state and + exempt the requirement for rolling out the dynamic system to get it. + """ + assert stage == "before_step" or stage == "after_step" + for agent_id in self.active_agents.keys(): + policy = self.get_policy(self._agent_to_object[agent_id]) + assert policy is not None, "No policy is set for agent {}".format(agent_id) + if stage == "before_step": + action = policy.act(agent_id) + step_infos[agent_id] = policy.get_action_info() + step_infos[agent_id].update(self.get_agent(agent_id).before_step(action)) + + return step_infos + + def before_step(self): + # not in replay mode + step_infos = self.try_actuate_agent(dict(), stage="before_step") + return step_infos + + def after_step(self, *args, **kwargs): + step_infos = self.for_each_active_agents(lambda v: v.after_step()) + step_infos = self.try_actuate_agent(step_infos, stage="after_step") + return step_infos + + def _translate(self, d): + return {self._object_to_agent[k]: v for k, v in d.items()} + + def get_observations(self): + if hasattr(self, "engine") and self.engine.replay_episode: + return self.engine.replay_manager.get_replay_agent_observations() + else: + ret = {} + for obj_id, observation in self.observations.items(): + if self.is_active_object(obj_id): + ret[self.object_to_agent(obj_id)] = observation + return ret + + def get_observation_spaces(self): + ret = {} + for obj_id, space in self.observation_spaces.items(): + if self.is_active_object(obj_id): + ret[self.object_to_agent(obj_id)] = space + return ret + + def get_action_spaces(self): + ret = dict() + for obj_id, space in self.action_spaces.items(): + if self.is_active_object(obj_id): + ret[self.object_to_agent(obj_id)] = space + return ret + + def is_active_object(self, object_name): + if not self.INITIALIZED: + return True + return True if object_name in self._active_objects.keys() else False + + @property + def active_agents(self): + """ + Return Map + """ + if hasattr(self, "engine") and self.engine is not None and self.engine.replay_episode: + return self.engine.replay_manager.replay_agents + else: + return {self._object_to_agent[k]: v for k, v in self._active_objects.items()} + + def get_agent(self, agent_name, raise_error=True): + object_name = self.agent_to_object(agent_name) + if object_name in self._active_objects: + return self._active_objects[object_name] + else: + if raise_error: + raise ValueError("Object {} not found!".format(object_name)) + else: + return None + + def object_to_agent(self, obj_name): + """ + We recommend to use engine.agent_to_object() or engine.object_to_agent() instead of the ones in agent_manager, + since this two functions DO NOT work when replaying episode. + :param obj_name: BaseObject.name + :return: agent id + """ + return self._object_to_agent[obj_name] + + def agent_to_object(self, agent_id): + """ + We recommend to use engine.agent_to_object() or engine.object_to_agent() instead of the ones in agent_manager, + since this two functions DO NOT work when replaying episode. + """ + return self._agent_to_object[agent_id] + + def destroy(self): + # when new agent joins in the game, we only change this two maps. + if self.INITIALIZED: + super(BaseAgentManager, self).destroy() + self._agent_to_object = {} + self._object_to_agent = {} + self._active_objects = {} + for obs in self.observations.values(): + obs.destroy() + self.observations = {} + self.observation_spaces = {} + self.action_spaces = {} + + self.INITIALIZED = False + + def for_each_active_agents(self, func, *args, **kwargs): + """ + This func is a function that take each object as the first argument and *arg and **kwargs as others. + """ + assert len(self.active_agents) > 0, "Not enough objects exist!" + ret = dict() + for k, v in self.active_agents.items(): + ret[k] = func(v, *args, **kwargs) + return ret diff --git a/metadrive/manager/scenario_map_manager.py b/metadrive/manager/scenario_map_manager.py index 6fb8cd949..5a4fd51c0 100644 --- a/metadrive/manager/scenario_map_manager.py +++ b/metadrive/manager/scenario_map_manager.py @@ -67,7 +67,7 @@ def update_route(self): self.engine.global_config.update( copy.deepcopy( dict( - target_vehicle_configs={ + agent_configs={ DEFAULT_AGENT: dict( spawn_position_heading=(init_position, init_yaw), spawn_velocity=init_state["velocity"] ) diff --git a/metadrive/manager/spawn_manager.py b/metadrive/manager/spawn_manager.py index d5293119e..d4150ed32 100644 --- a/metadrive/manager/spawn_manager.py +++ b/metadrive/manager/spawn_manager.py @@ -50,12 +50,12 @@ def __init__(self): self.need_update_spawn_places = True self.spawn_places_used = [] # reset every step - target_vehicle_configs = copy.copy(self.engine.global_config["target_vehicle_configs"]) - self._init_target_vehicle_configs = target_vehicle_configs + agent_configs = copy.copy(self.engine.global_config["agent_configs"]) + self._init_agent_configs = agent_configs spawn_roads = self.engine.global_config["spawn_roads"] - target_vehicle_configs, safe_spawn_places = self._auto_fill_spawn_roads_randomly(spawn_roads) - self.available_target_vehicle_configs = target_vehicle_configs + agent_configs, safe_spawn_places = self._auto_fill_spawn_roads_randomly(spawn_roads) + self.available_agent_configs = agent_configs self.safe_spawn_places = {place["identifier"]: place for place in safe_spawn_places} self.spawn_roads = spawn_roads self.need_update_spawn_places = True @@ -71,38 +71,38 @@ def get_not_randomize_vehicle_configs(configs): def reset(self): # random assign spawn points - num_agents = self.num_agents if self.num_agents is not None else len(self.available_target_vehicle_configs) - assert len(self.available_target_vehicle_configs) > 0 + num_agents = self.num_agents if self.num_agents is not None else len(self.available_agent_configs) + assert len(self.available_agent_configs) > 0 if num_agents == -1: # Infinite number of agents - target_agents = list(range(len(self.available_target_vehicle_configs))) + target_agents = list(range(len(self.available_agent_configs))) else: target_agents = self.np_random.choice( - [i for i in range(len(self.available_target_vehicle_configs))], num_agents, replace=False + [i for i in range(len(self.available_agent_configs))], num_agents, replace=False ) # set the spawn road ret = {} if len(target_agents) > 1: for real_idx, idx in enumerate(target_agents): - v_config = self.available_target_vehicle_configs[idx]["config"] + v_config = self.available_agent_configs[idx]["config"] v_config = self._randomize_position_in_slot(v_config) ret["agent{}".format(real_idx)] = v_config else: - ret["agent0"] = self._randomize_position_in_slot(self.available_target_vehicle_configs[0]["config"]) + ret["agent0"] = self._randomize_position_in_slot(self.available_agent_configs[0]["config"]) # set the destination/spawn point and update target_v config - target_vehicle_configs = {} + agent_configs = {} for agent_id, config in ret.items(): - init_config = copy.deepcopy(self._init_target_vehicle_configs[agent_id]) + init_config = copy.deepcopy(self._init_agent_configs[agent_id]) if not init_config.get("_specified_spawn_lane", False): init_config.update(config) config = init_config if not config.get("destination", False) or config["destination"] is None: config = self.update_destination_for(agent_id, config) - target_vehicle_configs[agent_id] = config + agent_configs[agent_id] = config - self.engine.global_config["target_vehicle_configs"] = copy.deepcopy(target_vehicle_configs) + self.engine.global_config["agent_configs"] = copy.deepcopy(agent_configs) @staticmethod def max_capacity(spawn_roads, exit_length, lane_num): @@ -133,14 +133,14 @@ def _auto_fill_spawn_roads_randomly(self, spawn_roads): # We can spawn agents in the middle of road at the initial time, but when some vehicles need to be respawn, # then we have to set it to the farthest places to ensure safety (otherwise the new vehicles may suddenly # appear at the middle of the road!) - target_vehicle_configs = [] + agent_configs = [] safe_spawn_places = [] for i, road in enumerate(spawn_roads): for lane_idx in range(self.lane_num): for j in range(num_slots): long = 1 / 2 * self.RESPAWN_REGION_LONGITUDE + j * self.RESPAWN_REGION_LONGITUDE lane_tuple = road.lane_index(lane_idx) # like (>>>, 1C0_0_, 1) and so on. - target_vehicle_configs.append( + agent_configs.append( Config( dict( identifier="|".join((str(s) for s in lane_tuple + (j, ))), @@ -154,8 +154,8 @@ def _auto_fill_spawn_roads_randomly(self, spawn_roads): ) ) # lock the spawn positions if j == 0: - safe_spawn_places.append(copy.deepcopy(target_vehicle_configs[-1])) - return target_vehicle_configs, safe_spawn_places + safe_spawn_places.append(copy.deepcopy(agent_configs[-1])) + return agent_configs, safe_spawn_places def step(self): self.spawn_places_used = [] diff --git a/metadrive/manager/traffic_manager.py b/metadrive/manager/traffic_manager.py index 6d199f36b..e6bd4f1c4 100644 --- a/metadrive/manager/traffic_manager.py +++ b/metadrive/manager/traffic_manager.py @@ -322,7 +322,7 @@ def __del__(self): logging.debug("{} is destroyed".format(self.__class__.__name__)) def __repr__(self): - return self.vehicles.__repr__() + return self._traffic_vehicles.__repr__() @property def vehicles(self): diff --git a/metadrive/obs/top_down_renderer.py b/metadrive/obs/top_down_renderer.py index 1b50195ea..0a76cf331 100644 --- a/metadrive/obs/top_down_renderer.py +++ b/metadrive/obs/top_down_renderer.py @@ -241,9 +241,9 @@ def __init__( self.history_objects = deque(maxlen=num_stack) self.history_target_vehicle = [] self.history_smooth = history_smooth - # self.current_track_vehicle = current_track_vehicle + # self.current_track_agent = current_track_agent if self.target_vehicle_heading_up: - assert self.current_track_vehicle is not None, "Specify which vehicle to track" + assert self.current_track_agent is not None, "Specify which vehicle to track" self._text_render_pos = [50, 50] self._font_size = 25 self._text_render_interval = 20 @@ -315,12 +315,12 @@ def render(self, text, to_image=True, *args, **kwargs): self.history_target_vehicle.append( history_object( type=MetaDriveType.VEHICLE, - name=self.current_track_vehicle.name, - heading_theta=self.current_track_vehicle.heading_theta, - WIDTH=self.current_track_vehicle.top_down_width, - LENGTH=self.current_track_vehicle.top_down_length, - position=self.current_track_vehicle.position, - color=self.current_track_vehicle.top_down_color, + name=self.current_track_agent.name, + heading_theta=self.current_track_agent.heading_theta, + WIDTH=self.current_track_agent.top_down_width, + LENGTH=self.current_track_agent.top_down_length, + position=self.current_track_agent.position, + color=self.current_track_agent.top_down_color, done=False ) ) @@ -379,8 +379,8 @@ def clear(self): self.screen_frames.clear() @property - def current_track_vehicle(self): - return self.engine.current_track_vehicle + def current_track_agent(self): + return self.engine.current_track_agent @staticmethod def _append_frame_objects(objects): @@ -492,7 +492,7 @@ def _draw(self, *args, **kwargs): ) self._deads.append(v) - v = self.current_track_vehicle + v = self.current_track_agent canvas = self._frame_canvas field = self._screen_canvas.get_size() if not self.target_vehicle_heading_up: diff --git a/metadrive/policy/idm_policy.py b/metadrive/policy/idm_policy.py index 3566f243f..8026d8472 100644 --- a/metadrive/policy/idm_policy.py +++ b/metadrive/policy/idm_policy.py @@ -411,7 +411,7 @@ def __init__(self, *args, **kwargs): self.engine.global_config["manual_control"] = False # hack def act(self, agent_id): - if self.control_object is self.engine.current_track_vehicle: + if self.control_object is self.engine.current_track_agent: self.engine.global_config["manual_control"] = True # hack action = self.manual_control_policy.act(agent_id) self.engine.global_config["manual_control"] = False # hack diff --git a/metadrive/policy/manual_control_policy.py b/metadrive/policy/manual_control_policy.py index 4ae0d5f38..aaa09738f 100644 --- a/metadrive/policy/manual_control_policy.py +++ b/metadrive/policy/manual_control_policy.py @@ -48,17 +48,17 @@ def act(self, agent_id): self.controller.process_others(takeover_callback=self.toggle_takeover) try: - if self.engine.current_track_vehicle.expert_takeover and self.enable_expert: - return expert(self.engine.current_track_vehicle) + if self.engine.current_track_agent.expert_takeover and self.enable_expert: + return expert(self.engine.current_track_agent) except (ValueError, AssertionError): # if observation doesn't match, fall back to manual control print("Current observation does not match the format that expert can accept.") self.toggle_takeover() - is_track_vehicle = self.engine.agent_manager.get_agent(agent_id) is self.engine.current_track_vehicle + is_track_vehicle = self.engine.agent_manager.get_agent(agent_id) is self.engine.current_track_agent not_in_native_bev = (self.engine.main_camera is None) or (not self.engine.main_camera.is_bird_view_camera()) if self.engine.global_config["manual_control"] and is_track_vehicle and not_in_native_bev: - action = self.controller.process_input(self.engine.current_track_vehicle) + action = self.controller.process_input(self.engine.current_track_agent) self.action_info["manual_control"] = True else: action = super(ManualControlPolicy, self).act(agent_id) @@ -68,9 +68,9 @@ def act(self, agent_id): return action def toggle_takeover(self): - if self.engine.current_track_vehicle is not None: - self.engine.current_track_vehicle.expert_takeover = not self.engine.current_track_vehicle.expert_takeover - print("The expert takeover is set to: ", self.engine.current_track_vehicle.expert_takeover) + if self.engine.current_track_agent is not None: + self.engine.current_track_agent.expert_takeover = not self.engine.current_track_agent.expert_takeover + print("The expert takeover is set to: ", self.engine.current_track_agent.expert_takeover) class TakeoverPolicy(EnvInputPolicy): @@ -94,8 +94,8 @@ def __init__(self, obj, seed): def act(self, agent_id): agent_action = super(TakeoverPolicy, self).act(agent_id) if self.engine.global_config["manual_control"] and self.engine.agent_manager.get_agent( - agent_id) is self.engine.current_track_vehicle and not self.engine.main_camera.is_bird_view_camera(): - expert_action = self.controller.process_input(self.engine.current_track_vehicle) + agent_id) is self.engine.current_track_agent and not self.engine.main_camera.is_bird_view_camera(): + expert_action = self.controller.process_input(self.engine.current_track_agent) if isinstance(self.controller, SteeringWheelController) and (self.controller.left_shift_paddle or self.controller.right_shift_paddle): # if expert_action[0]*agent_action[0]< 0 or expert_action[1]*agent_action[1] < 0: diff --git a/metadrive/tests/scripts/benchmark_brake.py b/metadrive/tests/scripts/benchmark_brake.py index c7bee0e81..e20593e97 100644 --- a/metadrive/tests/scripts/benchmark_brake.py +++ b/metadrive/tests/scripts/benchmark_brake.py @@ -15,53 +15,53 @@ def get_result(env): reported_end = None reported_start = None reported_rotation = None - start_heading = env.vehicle.heading_theta + start_heading = env.agent.heading_theta rotate_start_pos = None max_speed_loc = None for s in range(10000): if s < 20: action = np.array([0.0, 0.0]) - elif env.vehicle.speed_km_h < 100 and not reported_max_speed: + elif env.agent.speed_km_h < 100 and not reported_max_speed: action = np.array([0.0, 1.0]) else: action = np.array([0.0, -1.0]) # action = np.array([0.0, 0.0]) - if s > 20 and env.vehicle.speed_km_h > 1.0 and not reported_start: + if s > 20 and env.agent.speed_km_h > 1.0 and not reported_start: # print("Start the car at {}".format(s)) reported_start = s start_time = time.time() - if s > 20 and env.vehicle.speed_km_h >= 100 and not reported_max_speed: + if s > 20 and env.agent.speed_km_h >= 100 and not reported_max_speed: spend = (s - 1 - reported_start) * 0.1 print( "Achieve max speed: {} at {}. Spend {} s. Current location: {}".format( - max_speed_km_h, s - 1, spend, env.vehicle.position + max_speed_km_h, s - 1, spend, env.agent.position ) ) # print("real time spend to acc: {}".format(time.time() - start_time)) reported_max_speed = s - max_speed_loc = env.vehicle.position + max_speed_loc = env.agent.position - max_speed_km_h = max(max_speed_km_h, env.vehicle.speed_km_h) + max_speed_km_h = max(max_speed_km_h, env.agent.speed_km_h) - if s > 20 and env.vehicle.speed_km_h <= 1.0 and reported_max_speed and not reported_end: - dist = env.vehicle.position - max_speed_loc + if s > 20 and env.agent.speed_km_h <= 1.0 and reported_max_speed and not reported_end: + dist = env.agent.position - max_speed_loc dist = dist[0] - # print("Stop the car at {}. Distance {}. Current location: {}".format(s, dist, env.vehicle.position)) + # print("Stop the car at {}. Distance {}. Current location: {}".format(s, dist, env.agent.position)) reported_end = True - speed = env.vehicle.speed_km_h - current_heading = env.vehicle.heading_theta + speed = env.agent.speed_km_h + current_heading = env.agent.heading_theta if reported_end and not reported_rotation: if rotate_start_pos is None: - rotate_start_pos = env.vehicle.position + rotate_start_pos = env.agent.position if speed < 99: action = np.array([0.0, 1.0]) else: action = np.array([-1.0, 1.0]) if abs(current_heading - start_heading) >= np.pi / 2: - rotate_displacement = np.asarray(env.vehicle.position) - np.asarray(rotate_start_pos) + rotate_displacement = np.asarray(env.agent.position) - np.asarray(rotate_start_pos) reported_rotation = True o, r, tm, tc, i = env.step(action) diff --git a/metadrive/tests/scripts/capture_obs.py b/metadrive/tests/scripts/capture_obs.py index 217c493b8..32553cd3c 100644 --- a/metadrive/tests/scripts/capture_obs.py +++ b/metadrive/tests/scripts/capture_obs.py @@ -46,23 +46,23 @@ o, _ = env.reset() depth_camera = env.config["vehicle_config"]["depth_camera"] - depth_camera = DepthCamera(*depth_camera, chassis_np=env.vehicle.chassis, engine=env.engine) - env.vehicle.add_image_sensor("depth_camera", depth_camera) + depth_camera = DepthCamera(*depth_camera, chassis_np=env.agent.chassis, engine=env.engine) + env.agent.add_image_sensor("depth_camera", depth_camera) depth_camera.remove_display_region(env.engine) - # for sensor in env.vehicle.image_sensors.values(): + # for sensor in env.agent.image_sensors.values(): # sensor.remove_display_region(env.engine) - # env.vehicle.contact_result_render.detachNode() - # env.vehicle.navigation._right_arrow.detachNode() + # env.agent.contact_result_render.detachNode() + # env.agent.navigation._right_arrow.detachNode() - env.vehicle.chassis.setPos(244, 0, 1.5) + env.agent.chassis.setPos(244, 0, 1.5) for i in range(1, 100000): o, r, tm, tc, info = env.step([0, 1]) env.render( # text={ # "vehicle_num": len(env.engine.traffic_manager.traffic_vehicles), - # "dist_to_left:": env.vehicle.dist_to_left, - # "dist_to_right:": env.vehicle.dist_to_right, + # "dist_to_left:": env.agent.dist_to_left, + # "dist_to_right:": env.agent.dist_to_right, # } ) if tm or tc: diff --git a/metadrive/tests/scripts/capture_rgb_and_send_to_other_process.py b/metadrive/tests/scripts/capture_rgb_and_send_to_other_process.py index 771bb2a9e..bf2e3f099 100644 --- a/metadrive/tests/scripts/capture_rgb_and_send_to_other_process.py +++ b/metadrive/tests/scripts/capture_rgb_and_send_to_other_process.py @@ -34,7 +34,7 @@ def main_thread(): try: o, _ = env.reset() # print(HELP_MESSAGE) - env.vehicle.expert_takeover = False + env.agent.expert_takeover = False context = zmq.Context() socket = context.socket(zmq.PUSH) socket.bind("tcp://127.0.0.1:5555") diff --git a/metadrive/tests/test_component/test_bicycle_model.py b/metadrive/tests/test_component/test_bicycle_model.py index aca4f9bea..887b5ed6c 100644 --- a/metadrive/tests/test_component/test_bicycle_model.py +++ b/metadrive/tests/test_component/test_bicycle_model.py @@ -30,7 +30,7 @@ def _test_bicycle_model(): ) bicycle_model = BicycleModel() o, _ = env.reset() - vehicle = env.current_track_vehicle + vehicle = env.current_track_agent v_dir = vehicle.velocity_direction bicycle_model.reset(*vehicle.position, vehicle.speed, vehicle.heading_theta, np.arctan2(v_dir[1], v_dir[0])) actions = [] @@ -41,13 +41,13 @@ def _test_bicycle_model(): actions += [[s, throttle]] * 20 predict_states = [] for s in range(len(actions)): - vehicle = env.current_track_vehicle + vehicle = env.current_track_agent v_dir = vehicle.velocity_direction predict_states.append( predict( current_state=( - *env.current_track_vehicle.position, env.current_track_vehicle.speed, - env.current_track_vehicle.heading_theta, np.arctan2(v_dir[1], v_dir[0]) + *env.current_track_agent.position, env.current_track_agent.speed, + env.current_track_agent.heading_theta, np.arctan2(v_dir[1], v_dir[0]) ), actions=[actions[i] for i in range(s, s + horizon)], model=bicycle_model diff --git a/metadrive/tests/test_component/test_config_consistency.py b/metadrive/tests/test_component/test_config_consistency.py index c3c942c04..e8515995d 100644 --- a/metadrive/tests/test_component/test_config_consistency.py +++ b/metadrive/tests/test_component/test_config_consistency.py @@ -5,7 +5,7 @@ def test_config_consistency(): env = MetaDriveEnv({"vehicle_config": {"lidar": {"num_lasers": 999}}}) try: env.reset() - assert env.vehicle.config["lidar"]["num_lasers"] == 999 + assert env.agent.config["lidar"]["num_lasers"] == 999 finally: env.close() diff --git a/metadrive/tests/test_component/test_detector_mask.py b/metadrive/tests/test_component/test_detector_mask.py index ac54f8e47..29611a865 100644 --- a/metadrive/tests/test_component/test_detector_mask.py +++ b/metadrive/tests/test_component/test_detector_mask.py @@ -256,9 +256,9 @@ def test_detector_mask_in_lidar(): ) try: env.reset() - span = 2 * max(env.vehicle.WIDTH, env.vehicle.LENGTH) + span = 2 * max(env.agent.WIDTH, env.agent.LENGTH) detector_mask = DetectorMask( - env.vehicle.config.lidar.num_lasers, span, max_distance=env.vehicle.config.lidar.distance + env.agent.config.lidar.num_lasers, span, max_distance=env.agent.config.lidar.distance ) ep_count = 0 for tt in range(3000): @@ -266,12 +266,12 @@ def test_detector_mask_in_lidar(): # print("We have: {} vehicles!".format(env.engine.traffic_manager.get_vehicle_num())) - v = env.vehicle + v = env.agent c_p, objs = env.engine.get_sensor("lidar").perceive( v, physics_world=env.engine.physics_world.dynamic_world, - num_lasers=env.vehicle.config["lidar"]["num_lasers"], - distance=env.vehicle.config["lidar"]["distance"], + num_lasers=env.agent.config["lidar"]["num_lasers"], + distance=env.agent.config["lidar"]["distance"], detector_mask=None ) old_objs = v.lidar.get_surrounding_vehicles(objs) @@ -290,7 +290,7 @@ def test_detector_mask_in_lidar(): ) real_mask = old_cloud_points != 1.0 - mask = detector_mask.get_mask(env.vehicle.name) + mask = detector_mask.get_mask(env.agent.name) stack = np.stack([old_cloud_points, real_mask, mask]) if not all(mask[real_mask]): print('stop') @@ -304,12 +304,12 @@ def test_detector_mask_in_lidar(): ) # assert sum(abs(mask.astype(int) - real_mask.astype(int))) <= 3 - v = env.vehicle + v = env.agent c_p, objs = env.engine.get_sensor("lidar").perceive( v, - physics_world=env.vehicle.engine.physics_world.dynamic_world, - num_lasers=env.vehicle.config["lidar"]["num_lasers"], - distance=env.vehicle.config["lidar"]["distance"], + physics_world=env.agent.engine.physics_world.dynamic_world, + num_lasers=env.agent.config["lidar"]["num_lasers"], + distance=env.agent.config["lidar"]["distance"], ) new_cloud_points = np.array(copy.deepcopy(c_p)) np.testing.assert_almost_equal(old_cloud_points, new_cloud_points) diff --git a/metadrive/tests/test_component/test_distance_detector.py b/metadrive/tests/test_component/test_distance_detector.py index d6b116120..22d38e332 100644 --- a/metadrive/tests/test_component/test_distance_detector.py +++ b/metadrive/tests/test_component/test_distance_detector.py @@ -33,14 +33,14 @@ def test_original_lidar(render=False): v_config["spawn_lane_index"] = (FirstPGBlock.NODE_1, FirstPGBlock.NODE_2, 0) another_v = DefaultVehicle(v_config, random_seed=0) another_v.reset() - objs = env.engine.get_sensor("side_detector").perceive(env.vehicle, - env.vehicle.engine.physics_world.static_world, + objs = env.engine.get_sensor("side_detector").perceive(env.agent, + env.agent.engine.physics_world.static_world, num_lasers=2, distance=50 ).detected_objects + \ env.engine.get_sensor("lane_line_detector").perceive( - env.vehicle, - env.vehicle.engine.physics_world.static_world, + env.agent, + env.agent.engine.physics_world.static_world, num_lasers=2, distance=50 ).detected_objects @@ -53,7 +53,7 @@ def test_original_lidar(render=False): detect_base_vehicle = False for i in range(1, 1000): o, r, tm, tc, info = env.step([0, 1]) - if len(env.vehicle.lidar.get_surrounding_vehicles(env.observations[DEFAULT_AGENT].detected_objects)) > 2: + if len(env.agent.lidar.get_surrounding_vehicles(env.observations[DEFAULT_AGENT].detected_objects)) > 2: detect_traffic_vehicle = True for hit in env.observations[DEFAULT_AGENT].detected_objects: if isinstance(hit, BaseVehicle): @@ -92,14 +92,14 @@ def test_lidar_with_mask(render=False): another_v = DefaultVehicle(v_config, random_seed=0) another_v.reset() # for test - objs = env.engine.get_sensor("side_detector").perceive(env.vehicle, - env.vehicle.engine.physics_world.static_world, + objs = env.engine.get_sensor("side_detector").perceive(env.agent, + env.agent.engine.physics_world.static_world, num_lasers=2, distance=50 ).detected_objects + \ env.engine.get_sensor("lane_line_detector").perceive( - env.vehicle, - env.vehicle.engine.physics_world.static_world, + env.agent, + env.agent.engine.physics_world.static_world, num_lasers=2, distance=50 ).detected_objects @@ -112,7 +112,7 @@ def test_lidar_with_mask(render=False): detect_base_vehicle = False for i in range(1, 1000): o, r, tm, tc, info = env.step([0, 1]) - if len(env.vehicle.lidar.get_surrounding_vehicles(env.observations[DEFAULT_AGENT].detected_objects)) > 2: + if len(env.agent.lidar.get_surrounding_vehicles(env.observations[DEFAULT_AGENT].detected_objects)) > 2: detect_traffic_vehicle = True for hit in env.observations[DEFAULT_AGENT].detected_objects: if isinstance(hit, BaseVehicle): diff --git a/metadrive/tests/test_component/test_set_get_vehicle_attribute.py b/metadrive/tests/test_component/test_set_get_vehicle_attribute.py index 6bb8d4605..ed03fd32c 100644 --- a/metadrive/tests/test_component/test_set_get_vehicle_attribute.py +++ b/metadrive/tests/test_component/test_set_get_vehicle_attribute.py @@ -18,48 +18,48 @@ def test_set_get_vehicle_attribute(render=False): try: o, _ = env.reset() for _ in range(10): - env.vehicle.set_velocity([5, 0], in_local_frame=False) + env.agent.set_velocity([5, 0], in_local_frame=False) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.speed - 5) < 0.01 # may encounter friction - assert np.isclose(env.vehicle.velocity, np.array([5, 0]), rtol=1e-2, atol=1e-2).all() - assert abs(env.vehicle.speed - env.vehicle.speed_km_h / 3.6) < 1e-4 - assert np.isclose(env.vehicle.velocity, env.vehicle.velocity_km_h / 3.6).all() + assert abs(env.agent.speed - 5) < 0.01 # may encounter friction + assert np.isclose(env.agent.velocity, np.array([5, 0]), rtol=1e-2, atol=1e-2).all() + assert abs(env.agent.speed - env.agent.speed_km_h / 3.6) < 1e-4 + assert np.isclose(env.agent.velocity, env.agent.velocity_km_h / 3.6).all() for _ in range(10): o, r, tm, tc, info = env.step([0, 0]) - env.vehicle.set_velocity([0, 5], in_local_frame=False) - assert abs(env.vehicle.speed - 5) < 0.1 - assert np.isclose(env.vehicle.velocity, np.array([0, 5]), rtol=1e-5, atol=1e-5).all() - assert abs(env.vehicle.speed - env.vehicle.speed_km_h / 3.6) < 1e-4 - assert np.isclose(env.vehicle.velocity, env.vehicle.velocity_km_h / 3.6).all() + env.agent.set_velocity([0, 5], in_local_frame=False) + assert abs(env.agent.speed - 5) < 0.1 + assert np.isclose(env.agent.velocity, np.array([0, 5]), rtol=1e-5, atol=1e-5).all() + assert abs(env.agent.speed - env.agent.speed_km_h / 3.6) < 1e-4 + assert np.isclose(env.agent.velocity, env.agent.velocity_km_h / 3.6).all() for _ in range(10): o, r, tm, tc, info = env.step([0, 0]) - env.vehicle.set_velocity([5, 3], value=10, in_local_frame=False) - assert abs(env.vehicle.speed - 10) < 0.1 + env.agent.set_velocity([5, 3], value=10, in_local_frame=False) + assert abs(env.agent.speed - 10) < 0.1 assert np.isclose( - env.vehicle.velocity, + env.agent.velocity, np.array([5 / np.linalg.norm(np.array([5, 3])) * 10, 3 / np.linalg.norm(np.array([5, 3])) * 10]), rtol=1e-5, atol=1e-5 ).all() - assert abs(env.vehicle.speed - env.vehicle.speed_km_h / 3.6) < 1e-4 - assert np.isclose(env.vehicle.velocity, env.vehicle.velocity_km_h / 3.6).all() + assert abs(env.agent.speed - env.agent.speed_km_h / 3.6) < 1e-4 + assert np.isclose(env.agent.velocity, env.agent.velocity_km_h / 3.6).all() for _ in range(10): o, r, tm, tc, info = env.step([0, 0]) - env.vehicle.set_velocity([0.3, 0.1], value=10, in_local_frame=False) - assert abs(env.vehicle.speed - 10) < 0.1 + env.agent.set_velocity([0.3, 0.1], value=10, in_local_frame=False) + assert abs(env.agent.speed - 10) < 0.1 assert np.isclose( - env.vehicle.velocity, + env.agent.velocity, np.array( [0.3 / np.linalg.norm(np.array([0.3, 0.1])) * 10, 0.1 / np.linalg.norm(np.array([0.3, 0.1])) * 10] ), rtol=1e-5, atol=1e-5 ).all() - assert abs(env.vehicle.speed - env.vehicle.speed_km_h / 3.6) < 0.0001 - assert np.isclose(env.vehicle.velocity, env.vehicle.velocity_km_h / 3.6).all() + assert abs(env.agent.speed - env.agent.speed_km_h / 3.6) < 0.0001 + assert np.isclose(env.agent.velocity, env.agent.velocity_km_h / 3.6).all() finally: env.close() @@ -80,87 +80,87 @@ def test_coordinates(render=False): ) try: o, _ = env.reset() - assert abs(env.vehicle.heading_theta) == 0 - assert np.isclose(env.vehicle.heading, [1.0, 0]).all() - env.vehicle.set_velocity([5, 0], in_local_frame=True) + assert abs(env.agent.heading_theta) == 0 + assert np.isclose(env.agent.heading, [1.0, 0]).all() + env.agent.set_velocity([5, 0], in_local_frame=True) for _ in range(10): - env.vehicle.set_velocity([5, 0], in_local_frame=True) + env.agent.set_velocity([5, 0], in_local_frame=True) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.velocity[0] - 5.) < 1e-2 and abs(env.vehicle.velocity[1]) < 0.001 + assert abs(env.agent.velocity[0] - 5.) < 1e-2 and abs(env.agent.velocity[1]) < 0.001 o, _ = env.reset() - assert abs(env.vehicle.heading_theta) == 0 - assert np.isclose(env.vehicle.heading, [1.0, 0]).all() - env.vehicle.set_velocity([5, 0], in_local_frame=False) + assert abs(env.agent.heading_theta) == 0 + assert np.isclose(env.agent.heading, [1.0, 0]).all() + env.agent.set_velocity([5, 0], in_local_frame=False) for _ in range(10): o, r, tm, tc, info = env.step([0, 0]) - assert env.vehicle.velocity[0] > 3. and abs(env.vehicle.velocity[1]) < 0.001 + assert env.agent.velocity[0] > 3. and abs(env.agent.velocity[1]) < 0.001 env.reset() - env.vehicle.set_velocity([0, 5], in_local_frame=False) + env.agent.set_velocity([0, 5], in_local_frame=False) for _ in range(1): o, r, tm, tc, info = env.step([0, 0]) - assert env.vehicle.velocity[1] > 3. and abs(env.vehicle.velocity[0]) < 0.002 + assert env.agent.velocity[1] > 3. and abs(env.agent.velocity[0]) < 0.002 env.reset() - assert abs(env.vehicle.heading_theta) == 0 - assert np.isclose(env.vehicle.heading, [1.0, 0]).all() - env.vehicle.set_velocity([-5, 0], in_local_frame=False) + assert abs(env.agent.heading_theta) == 0 + assert np.isclose(env.agent.heading, [1.0, 0]).all() + env.agent.set_velocity([-5, 0], in_local_frame=False) for _ in range(10): o, r, tm, tc, info = env.step([0, 0]) - assert env.vehicle.velocity[0] < -3. and abs(env.vehicle.velocity[1]) < 0.001 + assert env.agent.velocity[0] < -3. and abs(env.agent.velocity[1]) < 0.001 - env.vehicle.set_velocity([0, -5], in_local_frame=False) + env.agent.set_velocity([0, -5], in_local_frame=False) for _ in range(1): o, r, tm, tc, info = env.step([0, 0]) - assert env.vehicle.velocity[1] < -3. and abs(env.vehicle.velocity[0]) < 0.002 + assert env.agent.velocity[1] < -3. and abs(env.agent.velocity[0]) < 0.002 # steering left env.reset() - begining_pos = env.vehicle.position - assert abs(env.vehicle.heading_theta) == 0 - assert np.isclose(env.vehicle.heading, [1.0, 0]).all() + begining_pos = env.agent.position + assert abs(env.agent.heading_theta) == 0 + assert np.isclose(env.agent.heading, [1.0, 0]).all() for _ in range(100): o, r, tm, tc, info = env.step([0.8, 0.8]) - assert env.vehicle.velocity[1] > 1. and abs(env.vehicle.velocity[0]) > 1 - assert env.vehicle.heading_theta > 0.3 # rad - assert env.vehicle.position[0] > begining_pos[0] and env.vehicle.position[1] > begining_pos[1] + assert env.agent.velocity[1] > 1. and abs(env.agent.velocity[0]) > 1 + assert env.agent.heading_theta > 0.3 # rad + assert env.agent.position[0] > begining_pos[0] and env.agent.position[1] > begining_pos[1] # steering right env.reset() - begining_pos = env.vehicle.position - assert abs(env.vehicle.heading_theta) == 0 - assert np.isclose(env.vehicle.heading, [1.0, 0]).all() + begining_pos = env.agent.position + assert abs(env.agent.heading_theta) == 0 + assert np.isclose(env.agent.heading, [1.0, 0]).all() for _ in range(100): o, r, tm, tc, info = env.step([-0.8, 0.8]) - assert env.vehicle.velocity[1] < -1. and abs(env.vehicle.velocity[0]) > 1 - assert env.vehicle.position[0] > begining_pos[0] and env.vehicle.position[1] < begining_pos[1] - assert env.vehicle.heading_theta < -0.3 # rad + assert env.agent.velocity[1] < -1. and abs(env.agent.velocity[0]) > 1 + assert env.agent.position[0] > begining_pos[0] and env.agent.position[1] < begining_pos[1] + assert env.agent.heading_theta < -0.3 # rad env.reset() - env.vehicle.set_heading_theta(np.deg2rad(90)) + env.agent.set_heading_theta(np.deg2rad(90)) for _ in range(10): o, r, tm, tc, info, = env.step([-0., 0.]) - assert wrap_to_pi(abs(env.vehicle.heading_theta - np.deg2rad(90))) < 1 - assert np.isclose(env.vehicle.heading, np.array([0, 1]), 1e-4, 1e-4).all() + assert wrap_to_pi(abs(env.agent.heading_theta - np.deg2rad(90))) < 1 + assert np.isclose(env.agent.heading, np.array([0, 1]), 1e-4, 1e-4).all() env.reset() - env.vehicle.set_heading_theta(np.deg2rad(45)) + env.agent.set_heading_theta(np.deg2rad(45)) for _ in range(10): o, r, tm, tc, info, = env.step([-0., 0.]) - assert wrap_to_pi(abs(env.vehicle.heading_theta - np.deg2rad(45))) < 1 - assert np.isclose(env.vehicle.heading, np.array([np.sqrt(2) / 2, np.sqrt(2) / 2]), 1e-4, 1e-4).all() + assert wrap_to_pi(abs(env.agent.heading_theta - np.deg2rad(45))) < 1 + assert np.isclose(env.agent.heading, np.array([np.sqrt(2) / 2, np.sqrt(2) / 2]), 1e-4, 1e-4).all() env.reset() - env.vehicle.set_heading_theta(np.deg2rad(-90)) + env.agent.set_heading_theta(np.deg2rad(-90)) for _ in range(10): o, r, tm, tc, info, = env.step([-0., 0.]) - assert abs(env.vehicle.heading_theta + np.deg2rad(-90) + np.pi) < 0.01 - assert np.isclose(env.vehicle.heading, np.array([0, -1]), 1e-4, 1e-4).all() + assert abs(env.agent.heading_theta + np.deg2rad(-90) + np.pi) < 0.01 + assert np.isclose(env.agent.heading, np.array([0, -1]), 1e-4, 1e-4).all() finally: env.close() @@ -182,60 +182,60 @@ def test_set_angular_v_and_set_v_no_friction(render=False): o, _ = env.reset() for _ in range(100): # 10 s , np.pi/10 per second - env.vehicle.set_angular_velocity(np.pi / 10) + env.agent.set_angular_velocity(np.pi / 10) o, r, tm, tc, info = env.step([0, 0]) - assert abs(wrap_to_pi(env.vehicle.heading_theta) - np.pi) < 1e-2, env.vehicle.heading_theta - # print(env.vehicle.heading_theta / np.pi * 180) + assert abs(wrap_to_pi(env.agent.heading_theta) - np.pi) < 1e-2, env.agent.heading_theta + # print(env.agent.heading_theta / np.pi * 180) o, _ = env.reset() for _ in range(100): # 10 s , np.pi/10 per second - env.vehicle.set_angular_velocity(18, in_rad=False) + env.agent.set_angular_velocity(18, in_rad=False) o, r, tm, tc, info = env.step([0, 0]) - assert abs(wrap_to_pi(env.vehicle.heading_theta) - np.pi) < 1e-2, env.vehicle.heading_theta - # print(env.vehicle.heading_theta / np.pi * 180) + assert abs(wrap_to_pi(env.agent.heading_theta) - np.pi) < 1e-2, env.agent.heading_theta + # print(env.agent.heading_theta / np.pi * 180) o, _ = env.reset() - start = env.vehicle.position[0] + start = env.agent.position[0] for _ in range(100): # 10 s - env.vehicle.set_velocity([1, 0], in_local_frame=True) + env.agent.set_velocity([1, 0], in_local_frame=True) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.position[0] - start - 10) < 5e-2, env.vehicle.position + assert abs(env.agent.position[0] - start - 10) < 5e-2, env.agent.position o, _ = env.reset() - start = env.vehicle.position[0] + start = env.agent.position[0] for _ in range(100): # 10 s - env.vehicle.set_velocity([1, 0], in_local_frame=False) + env.agent.set_velocity([1, 0], in_local_frame=False) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.position[0] - start - 10) < 5e-2, env.vehicle.position + assert abs(env.agent.position[0] - start - 10) < 5e-2, env.agent.position o, _ = env.reset() - start = env.vehicle.position[1] + start = env.agent.position[1] for _ in range(10): # 10 s - env.vehicle.set_velocity([0, 1], in_local_frame=False) + env.agent.set_velocity([0, 1], in_local_frame=False) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.position[1] - start - 1) < 5e-2, env.vehicle.position + assert abs(env.agent.position[1] - start - 1) < 5e-2, env.agent.position o, _ = env.reset() - start = env.vehicle.position[0] - env.vehicle.set_heading_theta(-np.pi / 2) + start = env.agent.position[0] + env.agent.set_heading_theta(-np.pi / 2) for _ in range(100): # 10 s - env.vehicle.set_velocity([0, 1], in_local_frame=True) + env.agent.set_velocity([0, 1], in_local_frame=True) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.position[0] - start - 10) < 5e-2, env.vehicle.position + assert abs(env.agent.position[0] - start - 10) < 5e-2, env.agent.position o, _ = env.reset() - start = env.vehicle.position[0] - env.vehicle.set_heading_theta(-np.pi / 2) + start = env.agent.position[0] + env.agent.set_heading_theta(-np.pi / 2) for _ in range(100): # 10 s - env.vehicle.set_velocity([1, 0], in_local_frame=False) + env.agent.set_velocity([1, 0], in_local_frame=False) o, r, tm, tc, info = env.step([0, 0]) - assert abs(env.vehicle.position[0] - start - 10) < 5e-2, env.vehicle.position + assert abs(env.agent.position[0] - start - 10) < 5e-2, env.agent.position finally: env.close() diff --git a/metadrive/tests/test_component/test_traffic_light.py b/metadrive/tests/test_component/test_traffic_light.py index 91b6398b8..89cfce3bc 100644 --- a/metadrive/tests/test_component/test_traffic_light.py +++ b/metadrive/tests/test_component/test_traffic_light.py @@ -30,7 +30,7 @@ def test_traffic_light(render=False, manual_control=False, debug=False): test_success = False for s in range(1, 100): env.step([0, 1]) - if env.vehicle.green_light: + if env.agent.green_light: test_success = True break assert test_success @@ -42,7 +42,7 @@ def test_traffic_light(render=False, manual_control=False, debug=False): test_success = False for s in range(1, 100): env.step([0, 1]) - if env.vehicle.red_light: + if env.agent.red_light: test_success = True break assert test_success @@ -54,7 +54,7 @@ def test_traffic_light(render=False, manual_control=False, debug=False): test_success = False for s in range(1, 100): env.step([0, 1]) - if env.vehicle.yellow_light: + if env.agent.yellow_light: test_success = True break assert test_success diff --git a/metadrive/tests/test_component/test_vehicle_coordinates.py b/metadrive/tests/test_component/test_vehicle_coordinates.py index aa6697eff..8ed3359e0 100644 --- a/metadrive/tests/test_component/test_vehicle_coordinates.py +++ b/metadrive/tests/test_component/test_vehicle_coordinates.py @@ -21,8 +21,8 @@ def test_coordinates_shift(): } ) env.reset() - env.vehicle.set_velocity([1, 0], 10) - # print(env.vehicle.speed) + env.agent.set_velocity([1, 0], 10) + # print(env.agent.speed) pos = [(x, y) for x in [-10, 0, 10] for y in [-20, 0, 20]] * 10 p = pos.pop() for s in range(1, 100000): @@ -32,8 +32,8 @@ def test_coordinates_shift(): break p = pos.pop() p = np.asarray(p) - heading, side = env.vehicle.convert_to_local_coordinates(p, env.vehicle.position) - recover_pos = env.vehicle.convert_to_world_coordinates([heading, side], env.vehicle.position) + heading, side = env.agent.convert_to_local_coordinates(p, env.agent.position) + recover_pos = env.agent.convert_to_world_coordinates([heading, side], env.agent.position) if abs(recover_pos[0] - p[0]) + abs(recover_pos[1] - p[1]) > 0.1: raise ValueError("vehicle coordinates convert error!") if tm: diff --git a/metadrive/tests/test_env/_test_change_friction_density_envs.py b/metadrive/tests/test_env/_test_change_friction_density_envs.py index 4656a49b7..ed60e06b1 100644 --- a/metadrive/tests/test_env/_test_change_friction_density_envs.py +++ b/metadrive/tests/test_env/_test_change_friction_density_envs.py @@ -24,7 +24,7 @@ def test_change_friction(): # SidePassEnv is tested in test_object_collision.py! # def test_side_pass_env(): -# _run(SidePassEnv({"target_vehicle_configs": {"default_agent": {"show_navi_mark": False}}})) +# _run(SidePassEnv({"agent_configs": {"default_agent": {"show_navi_mark": False}}})) def test_change_density_env(): diff --git a/metadrive/tests/test_env/test_ma_bidirection.py b/metadrive/tests/test_env/test_ma_bidirection.py index 89f1a5d07..9f85060d8 100644 --- a/metadrive/tests/test_env/test_ma_bidirection.py +++ b/metadrive/tests/test_env/test_ma_bidirection.py @@ -19,15 +19,15 @@ def test_ma_bidirection_idm(render=False): index = ('1y0_1_', '2B0_0_', 0) try: o, _ = env.reset() - env.vehicle.set_velocity([1, 0.1], 10) - # print(env.vehicle.speed) + env.agent.set_velocity([1, 0.1], 10) + # print(env.agent.speed) pass_test = False for s in range(1, 10000): o, r, tm, tc, info = env.step(env.action_space.sample()) - _, lat = env.vehicle.lane.local_coordinates(env.vehicle.position) - if abs(lat) > env.vehicle.lane.width / 2 + 0.1 and len(env.vehicle.navigation.current_ref_lanes) == 1: + _, lat = env.agent.lane.local_coordinates(env.agent.position) + if abs(lat) > env.agent.lane.width / 2 + 0.1 and len(env.agent.navigation.current_ref_lanes) == 1: raise ValueError("IDM can not pass bidirection block") - if env.vehicle.lane.index == index and abs(lat) < 0.1: + if env.agent.lane.index == index and abs(lat) < 0.1: pass_test = True if (tm or tc) and info["arrive_dest"]: break diff --git a/metadrive/tests/test_env/test_ma_bottleneck_env.py b/metadrive/tests/test_env/test_ma_bottleneck_env.py index 28dba6437..ef13a6f62 100644 --- a/metadrive/tests/test_env/test_ma_bottleneck_env.py +++ b/metadrive/tests/test_env/test_ma_bottleneck_env.py @@ -10,7 +10,7 @@ def _check_spaces_before_reset(env): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) assert a == b == c @@ -18,7 +18,7 @@ def _check_spaces_before_reset(env): def _check_spaces_after_reset(env, obs=None): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) assert a == b _check_shape(env) @@ -31,7 +31,7 @@ def _check_spaces_after_reset(env, obs=None): def _check_shape(env): b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) - d = set(env.vehicles.keys()) + d = set(env.agents.keys()) e = set(env.engine.agents.keys()) f = set([k for k in env.observation_space.spaces.keys() if not env.dones[k]]) assert d == e == f, (b, c, d, e, f) @@ -61,7 +61,7 @@ def _act(env, action): obs, reward, terminated, truncated, info = env.step(action) _check_shape(env) if not terminated["__all__"]: - assert len(env.vehicles) > 0 + assert len(env.agents) > 0 if not (set(obs.keys()) == set(reward.keys()) == set(env.observation_space.spaces.keys())): raise ValueError assert env.observation_space.contains(obs) @@ -89,12 +89,12 @@ def test_ma_bottleneck_env(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) assert not any(tc.values()) - # # print("Current number of vehicles: ", len(env.vehicles)) + # # print("Current number of vehicles: ", len(env.agents)) finally: env.close() @@ -121,11 +121,11 @@ def test_ma_bottleneck_horizon(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) - last_keys = set(env.vehicles.keys()) + last_keys = set(env.agents.keys()) for step in range(1, 1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - new_keys = set(env.vehicles.keys()) + new_keys = set(env.agents.keys()) if step == 0: assert not any(tm.values()) assert not any(tc.values()) @@ -165,7 +165,7 @@ def test_ma_bottleneck_reset(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -177,7 +177,7 @@ def test_ma_bottleneck_reset(): _check_spaces_after_reset(env, obs) assert set(env.observation_space.spaces.keys()) == set(env.action_space.spaces.keys()) == \ set(env.observations.keys()) == set(obs.keys()) == \ - set(env.config["target_vehicle_configs"].keys()) + set(env.config["agent_configs"].keys()) break finally: @@ -197,11 +197,11 @@ def test_ma_bottleneck_reset(): for step in range(1000): # for _ in range(2): - # act = {k: [1, 1] for k in env.vehicles.keys()} + # act = {k: [1, 1] for k in env.agents.keys()} # o, r, tm, tc, i = _act(env, act) # Force vehicle to success! - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): loc = v.navigation.final_lane.end # vehicle will stack together to explode! v.set_position(loc, height=int(v_id[5:]) * 2) @@ -218,10 +218,10 @@ def test_ma_bottleneck_reset(): # # print('sss') assert env._is_arrive_destination(v) - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v in env.vehicles.values(): + for v in env.agents.values(): assert len(v.navigation.checkpoints) > 2 for kkk, iii in i.items(): @@ -275,11 +275,11 @@ def _no_close_spawn(vehicles): obs, _ = env.reset() _check_spaces_after_reset(env) for _ in range(10): - o, r, tm, tc, i = env.step({k: [0, 0] for k in env.vehicles.keys()}) + o, r, tm, tc, i = env.step({k: [0, 0] for k in env.agents.keys()}) # print(d) assert not any(tm.values()) assert not any(tc.values()) - _no_close_spawn(env.vehicles) + _no_close_spawn(env.agents) # print('Finish {} resets.'.format(num_r)) finally: env.close() @@ -296,7 +296,7 @@ def test_ma_bottleneck_reward_done_alignment(): assert env.observation_space.contains(obs) for action in [-1, 1]: for step in range(5000): - act = {k: [action, 1] for k in env.vehicles.keys()} + act = {k: [action, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, ddd in tm.items(): if ddd and kkk != "__all__" and not tm["__all__"] and not i[kkk]["max_step"]: @@ -338,11 +338,11 @@ def test_ma_bottleneck_reward_done_alignment(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(5): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - env.vehicles["agent0"].set_position(env.vehicles["agent1"].position, height=1.2) + env.agents["agent0"].set_position(env.agents["agent1"].position, height=1.2) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if not any(tm.values()): @@ -398,15 +398,15 @@ def test_ma_bottleneck_reward_done_alignment(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(1): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): if v_id != "agent0": v.set_static(True) for step in range(5000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, iii in i.items(): if iii["crash"]: @@ -445,10 +445,10 @@ def test_ma_bottleneck_reward_done_alignment(): _check_spaces_before_reset(env) obs, _ = env.reset() _check_spaces_after_reset(env) - env.vehicles["agent0"].set_position(env.vehicles["agent0"].navigation.final_lane.end) + env.agents["agent0"].set_position(env.agents["agent0"].navigation.final_lane.end) assert env.observation_space.contains(obs) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if tm["__all__"]: break @@ -489,7 +489,7 @@ def _safe_places(self): _check_spaces_after_reset(env) ep_reward = 0.0 for step in range(1000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = env.step(act) ep_reward += next(iter(r.values())) if any(tm.values()): @@ -552,7 +552,7 @@ def test_ma_bottleneck_no_short_episode(): tm = {"__all__": False} for step in range(2000): # act = {k: actions[np.random.choice(len(actions))] for k in o.keys()} - act = {k: actions[np.random.choice(len(actions))] for k in env.vehicles.keys()} + act = {k: actions[np.random.choice(len(actions))] for k in env.agents.keys()} o_keys = set(o.keys()).union({"__all__"}) a_keys = set(env.action_space.spaces.keys()).union(set(tm.keys())) assert o_keys == a_keys @@ -589,13 +589,13 @@ def test_ma_bottleneck_horizon_termination(): should_respawn = set() special_agents = set(["agent0", "agent7"]) for step in range(1, 10000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} for v_id in act.keys(): if v_id in special_agents: act[v_id] = [1, 1] # Add some randomness else: - if v_id in env.vehicles: - env.vehicles[v_id].set_static(True) + if v_id in env.agents: + env.agents[v_id].set_static(True) obs, r, tm, tc, i = _act(env, act) if step == 0 or step == 1: assert not any(tm.values()) @@ -649,12 +649,12 @@ def check_pos(vehicles): assert env.observation_space.contains(obs) for step in range(50): env.reset() - check_pos(list(env.vehicles.values())) - for v_id in list(env.vehicles.keys())[:20]: - env.agent_manager.finish(v_id) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + for v_id in list(env.agents.keys())[:20]: + env.agent_manager._finish(v_id) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) finally: env.close() @@ -690,8 +690,8 @@ def check_pos(vehicles): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(50): - check_pos(list(env.vehicles.values())) - o, r, tm, tc, i = env.step({k: [0, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + o, r, tm, tc, i = env.step({k: [0, 1] for k in env.agents.keys()}) env.reset() if tm["__all__"]: break @@ -712,11 +712,11 @@ def test_randomize_spawn_place(): try: obs, _ = env.reset() for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} - last_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + act = {k: [1, 1] for k in env.agents.keys()} + last_pos = {kkk: v.position for kkk, v in env.agents.items()} o, r, tm, tc, i = env.step(act) obs, _ = env.reset() - new_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + new_pos = {kkk: v.position for kkk, v in env.agents.items()} for kkk, new_p in new_pos.items(): assert not np.all(new_p == last_pos[kkk]), (new_p, last_pos[kkk], kkk) finally: diff --git a/metadrive/tests/test_env/test_ma_env_force_reset.py b/metadrive/tests/test_env/test_ma_env_force_reset.py index 961ebf968..f3579b24d 100644 --- a/metadrive/tests/test_env/test_ma_env_force_reset.py +++ b/metadrive/tests/test_env/test_ma_env_force_reset.py @@ -15,15 +15,15 @@ def close_and_reset_num_agents(env, num_agents, raw_input_config): e = MultiAgentRoundaboutEnv(config) _raw_input_config = copy.deepcopy(config) e.reset() - assert len(e.vehicles) == e.num_agents == len(e.config["target_vehicle_configs"]) == 1 + assert len(e.vehicles) == e.num_agents == len(e.config["agent_configs"]) == 1 close_and_reset_num_agents(e, num_agents=2, raw_input_config=_raw_input_config) e.reset() - assert len(e.vehicles) == e.num_agents == len(e.config["target_vehicle_configs"]) == 2 + assert len(e.vehicles) == e.num_agents == len(e.config["agent_configs"]) == 2 close_and_reset_num_agents(e, num_agents=5, raw_input_config=_raw_input_config) e.reset() - assert len(e.vehicles) == e.num_agents == len(e.config["target_vehicle_configs"]) == 5 + assert len(e.vehicles) == e.num_agents == len(e.config["agent_configs"]) == 5 e.close() diff --git a/metadrive/tests/test_env/test_ma_intersection.py b/metadrive/tests/test_env/test_ma_intersection.py index 8c9f3270d..3a9bcbb97 100644 --- a/metadrive/tests/test_env/test_ma_intersection.py +++ b/metadrive/tests/test_env/test_ma_intersection.py @@ -13,7 +13,7 @@ def _check_spaces_before_reset(env): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) assert a == b == c @@ -21,7 +21,7 @@ def _check_spaces_before_reset(env): def _check_spaces_after_reset(env, obs=None): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) assert a == b _check_shape(env) @@ -34,7 +34,7 @@ def _check_spaces_after_reset(env, obs=None): def _check_shape(env): b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) - d = set(env.vehicles.keys()) + d = set(env.agents.keys()) e = set(env.engine.agents.keys()) f = set([k for k in env.observation_space.spaces.keys() if not env.dones[k]]) assert d == e == f, (b, c, d, e, f) @@ -64,7 +64,7 @@ def _act(env, action): obs, reward, terminated, truncated, info = env.step(action) _check_shape(env) if not terminated["__all__"]: - assert len(env.vehicles) > 0 + assert len(env.agents) > 0 if not (set(obs.keys()) == set(reward.keys()) == set(env.observation_space.spaces.keys())): raise ValueError assert env.observation_space.contains(obs) @@ -92,7 +92,7 @@ def test_ma_intersection_env(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -123,11 +123,11 @@ def test_ma_intersection_horizon(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) - last_keys = set(env.vehicles.keys()) + last_keys = set(env.agents.keys()) for step in range(1, 1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - new_keys = set(env.vehicles.keys()) + new_keys = set(env.agents.keys()) if step == 0: assert not any(tm.values()) if any(tm.values()): @@ -184,7 +184,7 @@ def test_ma_horizon_termination(): assert env.observation_space.contains(obs) max_agent = set() for step in range(1, 1000): - act = {k: [1, 1] if k[-1] in ["0", "1", "2"] else [0, 0] for k in env.vehicles.keys()} + act = {k: [1, 1] if k[-1] in ["0", "1", "2"] else [0, 0] for k in env.agents.keys()} for k in act.keys(): ep_len[k] += 1 o, r, tm, tc, i = _act(env, act) @@ -213,7 +213,7 @@ def test_ma_intersection_reset(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -224,7 +224,7 @@ def test_ma_intersection_reset(): _check_spaces_after_reset(env, obs) assert set(env.observation_space.spaces.keys()) == set(env.action_space.spaces.keys()) == \ set(env.observations.keys()) == set(obs.keys()) == \ - set(env.config["target_vehicle_configs"].keys()) + set(env.config["agent_configs"].keys()) break finally: @@ -244,11 +244,11 @@ def test_ma_intersection_reset(): for step in range(1000): # for _ in range(2): - # act = {k: [1, 1] for k in env.vehicles.keys()} + # act = {k: [1, 1] for k in env.agents.keys()} # o, r, tm, tc, i = _act(env, act) # Force vehicle to success! - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): loc = v.navigation.final_lane.end # vehicle will stack together to explode! v.set_position(loc, height=int(v_id[5:]) * 2) @@ -266,10 +266,10 @@ def test_ma_intersection_reset(): # print('sss') assert env._is_arrive_destination(v) - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v in env.vehicles.values(): + for v in env.agents.values(): assert len(v.navigation.checkpoints) > 2 for kkk, iii in i.items(): @@ -323,9 +323,9 @@ def _no_close_spawn(vehicles): obs, _ = env.reset() _check_spaces_after_reset(env) for _ in range(10): - o, r, tm, tc, i = env.step({k: [0, 0] for k in env.vehicles.keys()}) + o, r, tm, tc, i = env.step({k: [0, 0] for k in env.agents.keys()}) assert not (any(tm.values()) or any(tc.values())) - _no_close_spawn(env.vehicles) + _no_close_spawn(env.agents) # print('Finish {} resets.'.format(num_r)) finally: env.close() @@ -342,7 +342,7 @@ def _test_ma_intersection_reward_done_alignment(): assert env.observation_space.contains(obs) for action in [-1, 1]: for step in range(5000): - act = {k: [action, 1] for k in env.vehicles.keys()} + act = {k: [action, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, ddd in tm.items(): if ddd and kkk != "__all__": @@ -382,11 +382,11 @@ def _test_ma_intersection_reward_done_alignment(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(5): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - env.vehicles["agent0"].set_position(env.vehicles["agent1"].position, height=1.2) + env.agents["agent0"].set_position(env.agents["agent1"].position, height=1.2) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if not any(tm.values()): @@ -442,15 +442,15 @@ def _test_ma_intersection_reward_done_alignment(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(1): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): if v_id != "agent0": v.set_static(True) for step in range(5000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, iii in i.items(): if iii["crash"]: @@ -489,10 +489,10 @@ def _test_ma_intersection_reward_done_alignment(): _check_spaces_before_reset(env) obs, _ = env.reset() _check_spaces_after_reset(env) - env.vehicles["agent0"].set_position(env.vehicles["agent0"].navigation.final_lane.end) + env.agents["agent0"].set_position(env.agents["agent0"].navigation.final_lane.end) assert env.observation_space.contains(obs) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if tm["__all__"]: break @@ -534,7 +534,7 @@ def _safe_places(self): _check_spaces_after_reset(env) ep_reward = 0.0 for step in range(1000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = env.step(act) ep_reward += next(iter(r.values())) if any(tm.values()): @@ -597,7 +597,7 @@ def test_ma_intersection_no_short_episode(): tm = {"__all__": False} for step in range(2000): # act = {k: actions[np.random.choice(len(actions))] for k in o.keys()} - act = {k: actions[np.random.choice(len(actions))] for k in env.vehicles.keys()} + act = {k: actions[np.random.choice(len(actions))] for k in env.agents.keys()} o_keys = set(o.keys()).union({"__all__"}) a_keys = set(env.action_space.spaces.keys()).union(set(tm.keys())) assert o_keys == a_keys @@ -634,13 +634,13 @@ def test_ma_intersection_horizon_termination(): should_respawn = set() special_agents = set(["agent0", "agent7"]) for step in range(1, 10000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} for v_id in act.keys(): if v_id in special_agents: act[v_id] = [1, 1] # Add some randomness else: - if v_id in env.vehicles: - env.vehicles[v_id].set_static(True) + if v_id in env.agents: + env.agents[v_id].set_static(True) obs, r, tm, tc, i = _act(env, act) if step == 0 or step == 1: assert not any(tm.values()) @@ -694,12 +694,12 @@ def check_pos(vehicles): assert env.observation_space.contains(obs) for step in range(50): env.reset() - check_pos(list(env.vehicles.values())) - for v_id in list(env.vehicles.keys())[:20]: - env.agent_manager.finish(v_id) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + for v_id in list(env.agents.keys())[:20]: + env.agent_manager._finish(v_id) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) finally: env.close() @@ -727,8 +727,8 @@ def check_pos(vehicles): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(50): - check_pos(list(env.vehicles.values())) - o, r, tm, tc, i = env.step({k: [0, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + o, r, tm, tc, i = env.step({k: [0, 1] for k in env.agents.keys()}) env.reset() if tm["__all__"]: break @@ -742,11 +742,11 @@ def test_randomize_spawn_place(): try: obs, _ = env.reset() for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} - last_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + act = {k: [1, 1] for k in env.agents.keys()} + last_pos = {kkk: v.position for kkk, v in env.agents.items()} o, r, tm, tc, i = env.step(act) obs, _ = env.reset() - new_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + new_pos = {kkk: v.position for kkk, v in env.agents.items()} for kkk, new_p in new_pos.items(): assert not np.all(new_p == last_pos[kkk]), (new_p, last_pos[kkk], kkk) finally: diff --git a/metadrive/tests/test_env/test_ma_parking_lot.py b/metadrive/tests/test_env/test_ma_parking_lot.py index 4871fdd08..591fc82b5 100644 --- a/metadrive/tests/test_env/test_ma_parking_lot.py +++ b/metadrive/tests/test_env/test_ma_parking_lot.py @@ -9,7 +9,7 @@ def _check_spaces_before_reset(env): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) assert a == b == c @@ -17,7 +17,7 @@ def _check_spaces_before_reset(env): def _check_spaces_after_reset(env, obs=None): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) assert a == b _check_shape(env) @@ -30,7 +30,7 @@ def _check_spaces_after_reset(env, obs=None): def _check_shape(env): b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) - d = set(env.vehicles.keys()) + d = set(env.agents.keys()) e = set(env.engine.agents.keys()) f = set([k for k in env.observation_space.spaces.keys() if not env.dones[k]]) assert d == e == f, (b, c, d, e, f) @@ -60,7 +60,7 @@ def _act(env, action): obs, reward, terminated, truncated, info = env.step(action) _check_shape(env) if not terminated["__all__"]: - assert len(env.vehicles) > 0 + assert len(env.agents) > 0 if not (set(obs.keys()) == set(reward.keys()) == set(env.observation_space.spaces.keys())): raise ValueError assert env.observation_space.contains(obs) @@ -89,7 +89,7 @@ def test_ma_parking_lot_env(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -121,11 +121,11 @@ def test_ma_parking_lot_horizon(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) - last_keys = set(env.vehicles.keys()) + last_keys = set(env.agents.keys()) for step in range(1, 1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - new_keys = set(env.vehicles.keys()) + new_keys = set(env.agents.keys()) if step == 0: assert not any(tm.values()) assert not any(tc.values()) @@ -165,7 +165,7 @@ def test_ma_parking_lot_reset(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -177,7 +177,7 @@ def test_ma_parking_lot_reset(): _check_spaces_after_reset(env, obs) assert set(env.observation_space.spaces.keys()) == set(env.action_space.spaces.keys()) == \ set(env.observations.keys()) == set(obs.keys()) == \ - set(env.config["target_vehicle_configs"].keys()) + set(env.config["agent_configs"].keys()) break finally: @@ -197,11 +197,11 @@ def test_ma_parking_lot_reset(): for step in range(1000): # for _ in range(2): - # act = {k: [1, 1] for k in env.vehicles.keys()} + # act = {k: [1, 1] for k in env.agents.keys()} # o, r, tm, tc, i = _act(env, act) # Force vehicle to success! - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): loc = v.navigation.final_lane.end # vehicle will stack together to explode! v.set_position(loc, height=int(v_id[5:]) * 2) @@ -219,10 +219,10 @@ def test_ma_parking_lot_reset(): # print('sss') assert env._is_arrive_destination(v) - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v in env.vehicles.values(): + for v in env.agents.values(): assert len(v.navigation.checkpoints) > 2 for kkk, iii in i.items(): @@ -271,10 +271,10 @@ def _no_close_spawn(vehicles): obs, _ = env.reset() _check_spaces_after_reset(env) for _ in range(10): - o, r, tm, tc, i = env.step({k: [0, 0] for k in env.vehicles.keys()}) + o, r, tm, tc, i = env.step({k: [0, 0] for k in env.agents.keys()}) assert not any(tm.values()) assert not any(tc.values()) - _no_close_spawn(env.vehicles) + _no_close_spawn(env.agents) # print('Finish {} resets.'.format(num_r)) finally: env.close() @@ -292,7 +292,7 @@ def test_ma_parking_lot_reward_done_alignment(): out_num = 0 for action in [-1, 1]: for step in range(5000): - act = {k: [action, 1] for k in env.vehicles.keys()} + act = {k: [action, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, ddd in tm.items(): if ddd and kkk != "__all__": @@ -335,11 +335,11 @@ def test_ma_parking_lot_reward_done_alignment(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(5): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - env.vehicles["agent0"].set_position(env.vehicles["agent1"].position, height=1.2) + env.agents["agent0"].set_position(env.agents["agent1"].position, height=1.2) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if not any(tm.values()): @@ -399,15 +399,15 @@ def test_ma_parking_lot_reward_done_alignment(): obs, _ = env.reset(seed=0) _check_spaces_after_reset(env, obs) for step in range(1): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): if v_id != "agent2": v.set_static(True) out_num = 0 for step in range(5000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, iii in i.items(): if iii["crash"] and not iii["crash_sidewalk"]: @@ -449,10 +449,10 @@ def test_ma_parking_lot_reward_done_alignment(): _check_spaces_before_reset(env) obs, _ = env.reset() _check_spaces_after_reset(env) - env.vehicles["agent0"].set_position(env.vehicles["agent0"].navigation.final_lane.end) + env.agents["agent0"].set_position(env.agents["agent0"].navigation.final_lane.end) assert env.observation_space.contains(obs) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if tm["__all__"]: break @@ -518,7 +518,7 @@ def test_ma_parking_lot_no_short_episode(): tm = {"__all__": False} for step in range(2000): # act = {k: actions[np.random.choice(len(actions))] for k in o.keys()} - act = {k: actions[np.random.choice(len(actions))] for k in env.vehicles.keys()} + act = {k: actions[np.random.choice(len(actions))] for k in env.agents.keys()} o_keys = set(o.keys()).union({"__all__"}) a_keys = set(env.action_space.spaces.keys()).union(set(tm.keys())) assert o_keys == a_keys @@ -554,9 +554,9 @@ def test_ma_parking_lot_horizon_termination(): assert env.observation_space.contains(obs) should_respawn = set() for step in range(1, 10000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} for v_id in act.keys(): - env.vehicles[v_id].set_static(True) + env.agents[v_id].set_static(True) obs, r, tm, tc, i = _act(env, act) # env.render("top_down", camera_position=(42.5, 0), film_size=(500, 500)) if step == 0 or step == 1: @@ -612,12 +612,12 @@ def check_pos(vehicles): assert env.observation_space.contains(obs) for step in range(50): env.reset() - check_pos(list(env.vehicles.values())) - for v_id in list(env.vehicles.keys())[:20]: - env.agent_manager.finish(v_id) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + for v_id in list(env.agents.keys())[:20]: + env.agent_manager._finish(v_id) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) finally: env.close() @@ -645,8 +645,8 @@ def check_pos(vehicles): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(50): - check_pos(list(env.vehicles.values())) - o, r, tm, tc, i = env.step({k: [0, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + o, r, tm, tc, i = env.step({k: [0, 1] for k in env.agents.keys()}) env.reset() if tm["__all__"]: break @@ -660,11 +660,11 @@ def test_randomize_spawn_place(): try: obs, _ = env.reset() for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} - last_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + act = {k: [1, 1] for k in env.agents.keys()} + last_pos = {kkk: v.position for kkk, v in env.agents.items()} o, r, tm, tc, i = env.step(act) obs, _ = env.reset() - new_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + new_pos = {kkk: v.position for kkk, v in env.agents.items()} for kkk, new_p in new_pos.items(): assert not np.all(new_p == last_pos[kkk]), (new_p, last_pos[kkk], kkk) finally: diff --git a/metadrive/tests/test_env/test_ma_racing.py b/metadrive/tests/test_env/test_ma_racing.py index 3d25a414b..a40b70bf6 100644 --- a/metadrive/tests/test_env/test_ma_racing.py +++ b/metadrive/tests/test_env/test_ma_racing.py @@ -9,7 +9,7 @@ def _check_spaces_before_reset(env): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) assert a == b == c @@ -17,7 +17,7 @@ def _check_spaces_before_reset(env): def _check_spaces_after_reset(env, obs=None): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) assert a == b _check_shape(env) @@ -30,7 +30,7 @@ def _check_spaces_after_reset(env, obs=None): def _check_shape(env): b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) - d = set(env.vehicles.keys()) + d = set(env.agents.keys()) e = set(env.engine.agents.keys()) f = set([k for k in env.observation_space.spaces.keys() if not env.dones[k]]) assert d == e == f, (b, c, d, e, f) @@ -60,7 +60,7 @@ def _act(env, action): obs, reward, terminated, truncated, info = env.step(action) _check_shape(env) if not terminated["__all__"]: - assert len(env.vehicles) > 0 + assert len(env.agents) > 0 if not (set(obs.keys()) == set(reward.keys()) == set(env.observation_space.spaces.keys())): raise ValueError assert env.observation_space.contains(obs) @@ -93,7 +93,7 @@ def test_ma_racing_env_with_IDM(num_agents): assert env.observation_space.contains(obs) episode_reward_record = defaultdict(float) for step in range(3_000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) # env.render(mode="topdown") if step == 0: @@ -149,7 +149,7 @@ def test_guardrail_collision_detection(num_agents, render=False): assert env.observation_space.contains(obs) episode_reward_record = defaultdict(float) for step in range(3_000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) print(i) if render: diff --git a/metadrive/tests/test_env/test_ma_roundabout_env.py b/metadrive/tests/test_env/test_ma_roundabout_env.py index b59532f65..416d44535 100644 --- a/metadrive/tests/test_env/test_ma_roundabout_env.py +++ b/metadrive/tests/test_env/test_ma_roundabout_env.py @@ -10,7 +10,7 @@ def _check_spaces_before_reset(env): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) assert a == b == c @@ -18,7 +18,7 @@ def _check_spaces_before_reset(env): def _check_spaces_after_reset(env, obs=None): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) assert a == b _check_shape(env) @@ -31,7 +31,7 @@ def _check_spaces_after_reset(env, obs=None): def _check_shape(env): b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) - d = set(env.vehicles.keys()) + d = set(env.agents.keys()) e = set(env.engine.agents.keys()) f = set([k for k in env.observation_space.spaces.keys() if not env.dones[k]]) assert d == e == f, (b, c, d, e, f) @@ -61,7 +61,7 @@ def _act(env, action): obs, reward, terminated, truncated, info = env.step(action) _check_shape(env) if not terminated["__all__"]: - assert len(env.vehicles) > 0 + assert len(env.agents) > 0 if not (set(obs.keys()) == set(reward.keys()) == set(env.observation_space.spaces.keys())): raise ValueError assert env.observation_space.contains(obs) @@ -90,7 +90,7 @@ def test_ma_roundabout_env(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -121,11 +121,11 @@ def test_ma_roundabout_horizon(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) - last_keys = set(env.vehicles.keys()) + last_keys = set(env.agents.keys()) for step in range(1, 1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - new_keys = set(env.vehicles.keys()) + new_keys = set(env.agents.keys()) if step == 0: assert not any(tm.values()) assert not any(tc.values()) @@ -166,7 +166,7 @@ def test_ma_roundabout_reset(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -177,7 +177,7 @@ def test_ma_roundabout_reset(): _check_spaces_after_reset(env, obs) assert set(env.observation_space.spaces.keys()) == set(env.action_space.spaces.keys()) == \ set(env.observations.keys()) == set(obs.keys()) == \ - set(env.config["target_vehicle_configs"].keys()) + set(env.config["agent_configs"].keys()) break finally: @@ -197,11 +197,11 @@ def test_ma_roundabout_reset(): for step in range(1000): # # for _ in range(2): - # act = {k: [1, 1] for k in env.vehicles.keys()} + # act = {k: [1, 1] for k in env.agents.keys()} # o, r, tm, tc, i = _act(env, act) # Force vehicle to success! - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): loc = v.navigation.final_lane.end # vehicle will stack together to explode! v.set_position(loc, height=int(v_id[5:]) * 2) @@ -218,10 +218,10 @@ def test_ma_roundabout_reset(): # print('sss') assert env._is_arrive_destination(v) - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v in env.vehicles.values(): + for v in env.agents.values(): assert len(v.navigation.checkpoints) > 2 for kkk, iii in i.items(): @@ -266,10 +266,10 @@ def _no_close_spawn(vehicles): obs, _ = env.reset() _check_spaces_after_reset(env) for _ in range(10): - o, r, tm, tc, i = env.step({k: [0, 0] for k in env.vehicles.keys()}) + o, r, tm, tc, i = env.step({k: [0, 0] for k in env.agents.keys()}) assert not any(tm.values()) assert not any(tc.values()) - _no_close_spawn(env.vehicles) + _no_close_spawn(env.agents) # print('Finish {} resets.'.format(num_r)) finally: env.close() @@ -286,7 +286,7 @@ def test_ma_roundabout_reward_done_alignment(): assert env.observation_space.contains(obs) for action in [-1, 1]: for step in range(5000): - act = {k: [action, 1] for k in env.vehicles.keys()} + act = {k: [action, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, ddd in tm.items(): if ddd and kkk != "__all__": @@ -328,11 +328,11 @@ def test_ma_roundabout_reward_done_alignment_1(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(5): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - env.vehicles["agent0"].set_position(env.vehicles["agent1"].position, height=1.2) + env.agents["agent0"].set_position(env.agents["agent1"].position, height=1.2) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if not any(tm.values()): @@ -388,15 +388,15 @@ def test_ma_roundabout_reward_done_alignment_1(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(1): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): if v_id != "agent0": v.set_static(True) for step in range(5000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, iii in i.items(): if iii[TerminationState.CRASH]: @@ -430,10 +430,10 @@ def test_ma_roundabout_reward_done_alignment_1(): _check_spaces_before_reset(env) obs, _ = env.reset() _check_spaces_after_reset(env) - env.vehicles["agent0"].set_position(env.vehicles["agent0"].navigation.final_lane.end) + env.agents["agent0"].set_position(env.agents["agent0"].navigation.final_lane.end) assert env.observation_space.contains(obs) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if tm["__all__"]: break @@ -474,7 +474,7 @@ def _safe_places(self): _check_spaces_after_reset(env) ep_reward = 0.0 for step in range(1000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = env.step(act) ep_reward += next(iter(r.values())) if any(tm.values()): @@ -537,7 +537,7 @@ def test_ma_roundabout_no_short_episode(): tm = {"__all__": False} for step in range(2000): # act = {k: actions[np.random.choice(len(actions))] for k in o.keys()} - act = {k: actions[np.random.choice(len(actions))] for k in env.vehicles.keys()} + act = {k: actions[np.random.choice(len(actions))] for k in env.agents.keys()} o_keys = set(o.keys()).union({"__all__"}) a_keys = set(env.action_space.spaces.keys()).union(set(tm.keys())) assert o_keys == a_keys @@ -574,13 +574,13 @@ def test_ma_roundabout_horizon_termination(): should_respawn = set() special_agents = set(["agent0", "agent7"]) for step in range(1, 10000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} for v_id in act.keys(): if v_id in special_agents: act[v_id] = [1, 1] # Add some randomness else: - if v_id in env.vehicles: - env.vehicles[v_id].set_static(True) + if v_id in env.agents: + env.agents[v_id].set_static(True) obs, r, tm, tc, i = _act(env, act) if step == 0 or step == 1: assert not any(tm.values()) @@ -635,12 +635,12 @@ def check_pos(vehicles): assert env.observation_space.contains(obs) for step in range(50): env.reset() - check_pos(list(env.vehicles.values())) - for v_id in list(env.vehicles.keys())[:20]: - env.agent_manager.finish(v_id) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + for v_id in list(env.agents.keys())[:20]: + env.agent_manager._finish(v_id) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) finally: env.close() @@ -666,8 +666,8 @@ def check_pos(vehicles): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(300): - check_pos(list(env.vehicles.values())) - o, r, tm, tc, i = env.step({k: [0, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + o, r, tm, tc, i = env.step({k: [0, 1] for k in env.agents.keys()}) if tm["__all__"]: break finally: @@ -680,11 +680,11 @@ def test_randomize_spawn_place(): try: obs, _ = env.reset() for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} - last_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + act = {k: [1, 1] for k in env.agents.keys()} + last_pos = {kkk: v.position for kkk, v in env.agents.items()} o, r, tm, tc, i = env.step(act) obs, _ = env.reset() - new_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + new_pos = {kkk: v.position for kkk, v in env.agents.items()} for kkk, new_p in new_pos.items(): assert not np.all(new_p == last_pos[kkk]), (new_p, last_pos[kkk], kkk) finally: diff --git a/metadrive/tests/test_env/test_ma_tollgate.py b/metadrive/tests/test_env/test_ma_tollgate.py index d2f4be89b..0f9677cb9 100644 --- a/metadrive/tests/test_env/test_ma_tollgate.py +++ b/metadrive/tests/test_env/test_ma_tollgate.py @@ -10,7 +10,7 @@ def _check_spaces_before_reset(env): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) assert a == b == c @@ -18,7 +18,7 @@ def _check_spaces_before_reset(env): def _check_spaces_after_reset(env, obs=None): - a = set(env.config["target_vehicle_configs"].keys()) + a = set(env.config["agent_configs"].keys()) b = set(env.observation_space.spaces.keys()) assert a == b _check_shape(env) @@ -31,7 +31,7 @@ def _check_spaces_after_reset(env, obs=None): def _check_shape(env): b = set(env.observation_space.spaces.keys()) c = set(env.action_space.spaces.keys()) - d = set(env.vehicles.keys()) + d = set(env.agents.keys()) e = set(env.engine.agents.keys()) f = set([k for k in env.observation_space.spaces.keys() if not env.dones[k]]) assert d == e == f, (b, c, d, e, f) @@ -61,7 +61,7 @@ def _act(env, action): obs, reward, terminated, truncated, info = env.step(action) _check_shape(env) if not terminated["__all__"]: - assert len(env.vehicles) > 0 + assert len(env.agents) > 0 if not (set(obs.keys()) == set(reward.keys()) == set(env.observation_space.spaces.keys())): raise ValueError assert env.observation_space.contains(obs) @@ -88,7 +88,7 @@ def test_ma_toll_env(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -120,11 +120,11 @@ def test_ma_toll_horizon(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) - last_keys = set(env.vehicles.keys()) + last_keys = set(env.agents.keys()) for step in range(1, 1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - new_keys = set(env.vehicles.keys()) + new_keys = set(env.agents.keys()) if step == 0: assert not any(tm.values()) if any(tm.values()): @@ -163,7 +163,7 @@ def test_ma_toll_reset(): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(1000): - act = {k: [1, 1] for k in env.vehicles.keys()} + act = {k: [1, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if step == 0: assert not any(tm.values()) @@ -175,7 +175,7 @@ def test_ma_toll_reset(): _check_spaces_after_reset(env, obs) assert set(env.observation_space.spaces.keys()) == set(env.action_space.spaces.keys()) == \ set(env.observations.keys()) == set(obs.keys()) == \ - set(env.config["target_vehicle_configs"].keys()) + set(env.config["agent_configs"].keys()) break finally: @@ -195,11 +195,11 @@ def test_ma_toll_reset(): for step in range(1000): # for _ in range(2): - # act = {k: [1, 1] for k in env.vehicles.keys()} + # act = {k: [1, 1] for k in env.agents.keys()} # o, r, tm, tc, i = _act(env, act) # Force vehicle to success! - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): loc = v.navigation.final_lane.end # vehicle will stack together to explode! v.set_position(loc, height=int(v_id[5:]) * 2) @@ -217,10 +217,10 @@ def test_ma_toll_reset(): # print('sss') assert env._is_arrive_destination(v) - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v in env.vehicles.values(): + for v in env.agents.values(): assert len(v.navigation.checkpoints) > 2 for kkk, iii in i.items(): @@ -274,9 +274,9 @@ def _no_close_spawn(vehicles): obs, _ = env.reset() _check_spaces_after_reset(env) for _ in range(10): - o, r, tm, tc, i = env.step({k: [0, 0] for k in env.vehicles.keys()}) + o, r, tm, tc, i = env.step({k: [0, 0] for k in env.agents.keys()}) assert not any(tm.values()) - _no_close_spawn(env.vehicles) + _no_close_spawn(env.agents) # print('Finish {} resets.'.format(num_r)) finally: env.close() @@ -293,7 +293,7 @@ def test_ma_toll_reward_done_alignment_1(): assert env.observation_space.contains(obs) for action in [-1, 1]: for step in range(5000): - act = {k: [action, 1] for k in env.vehicles.keys()} + act = {k: [action, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, ddd in tm.items(): if ddd and kkk != "__all__" and not tm["__all__"] and not i[kkk]["max_step"]: @@ -335,11 +335,11 @@ def test_ma_toll_reward_done_alignment_1(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(5): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - env.vehicles["agent0"].set_position(env.vehicles["agent1"].position, height=1.2) + env.agents["agent0"].set_position(env.agents["agent1"].position, height=1.2) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if not any(tm.values()): @@ -397,15 +397,15 @@ def test_ma_toll_reward_done_alignment_2(): obs, _ = env.reset() _check_spaces_after_reset(env, obs) for step in range(1): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) - for v_id, v in env.vehicles.items(): + for v_id, v in env.agents.items(): if v_id != "agent0": v.set_static(True) for step in range(5000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) for kkk, iii in i.items(): if iii["crash_vehicle"]: @@ -442,10 +442,10 @@ def test_ma_toll_reward_done_alignment_2(): _check_spaces_before_reset(env) obs, _ = env.reset() _check_spaces_after_reset(env) - env.vehicles["agent0"].set_position(env.vehicles["agent0"].navigation.final_lane.end) + env.agents["agent0"].set_position(env.agents["agent0"].navigation.final_lane.end) assert env.observation_space.contains(obs) for step in range(5000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} o, r, tm, tc, i = _act(env, act) if tm["__all__"]: break @@ -486,7 +486,7 @@ def _safe_places(self): _check_spaces_after_reset(env) ep_reward = 0.0 for step in range(1000): - act = {k: [0, 1] for k in env.vehicles.keys()} + act = {k: [0, 1] for k in env.agents.keys()} o, r, tm, tc, i = env.step(act) ep_reward += next(iter(r.values())) if any(tm.values()): @@ -549,7 +549,7 @@ def test_ma_toll_no_short_episode(): tm = {"__all__": False} for step in range(2000): # act = {k: actions[np.random.choice(len(actions))] for k in o.keys()} - act = {k: actions[np.random.choice(len(actions))] for k in env.vehicles.keys()} + act = {k: actions[np.random.choice(len(actions))] for k in env.agents.keys()} o_keys = set(o.keys()).union({"__all__"}) a_keys = set(env.action_space.spaces.keys()).union(set(tm.keys())) assert o_keys == a_keys @@ -601,13 +601,13 @@ def test_ma_toll_horizon_termination(vis=False): should_respawn = set() special_agents = set(["agent0", "agent7"]) for step in range(1, 10000): - act = {k: [0, 0] for k in env.vehicles.keys()} + act = {k: [0, 0] for k in env.agents.keys()} for v_id in act.keys(): if v_id in special_agents: act[v_id] = [1, 1] # Add some randomness else: - if v_id in env.vehicles: - env.vehicles[v_id].set_static(True) + if v_id in env.agents: + env.agents[v_id].set_static(True) obs, r, tm, tc, i = _act(env, act) # if vis: @@ -669,12 +669,12 @@ def check_pos(vehicles): assert env.observation_space.contains(obs) for step in range(50): env.reset() - check_pos(list(env.vehicles.values())) - for v_id in list(env.vehicles.keys())[:20]: - env.agent_manager.finish(v_id) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) - env.step({k: [1, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + for v_id in list(env.agents.keys())[:20]: + env.agent_manager._finish(v_id) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) + env.step({k: [1, 1] for k in env.agents.keys()}) finally: env.close() @@ -702,8 +702,8 @@ def check_pos(vehicles): _check_spaces_after_reset(env, obs) assert env.observation_space.contains(obs) for step in range(50): - check_pos(list(env.vehicles.values())) - o, r, tm, tc, i = env.step({k: [0, 1] for k in env.vehicles.keys()}) + check_pos(list(env.agents.values())) + o, r, tm, tc, i = env.step({k: [0, 1] for k in env.agents.keys()}) env.reset() if tm["__all__"]: break @@ -717,11 +717,11 @@ def test_randomize_spawn_place(): try: obs, _ = env.reset() for step in range(100): - act = {k: [1, 1] for k in env.vehicles.keys()} - last_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + act = {k: [1, 1] for k in env.agents.keys()} + last_pos = {kkk: v.position for kkk, v in env.agents.items()} o, r, tm, tc, i = env.step(act) obs, _ = env.reset() - new_pos = {kkk: v.position for kkk, v in env.vehicles.items()} + new_pos = {kkk: v.position for kkk, v in env.agents.items()} for kkk, new_p in new_pos.items(): assert not np.all(new_p == last_pos[kkk]), (new_p, last_pos[kkk], kkk) finally: diff --git a/metadrive/tests/test_env/test_metadrive_env.py b/metadrive/tests/test_env/test_metadrive_env.py index 0df34f533..0a140753d 100644 --- a/metadrive/tests/test_env/test_metadrive_env.py +++ b/metadrive/tests/test_env/test_metadrive_env.py @@ -60,7 +60,7 @@ def test_pgdrive_env_blackbox(config): env = MetaDriveEnv(config=cfg) try: obs, _ = env.reset() - assert env.vehicle.panda_color == sns.color_palette("colorblind")[2] + assert env.agent.panda_color == sns.color_palette("colorblind")[2] assert env.observation_space.contains(obs) _act(env, env.action_space.sample()) for x in [-1, 0, 1]: diff --git a/metadrive/tests/test_env/test_naive_multi_agent.py b/metadrive/tests/test_env/test_naive_multi_agent.py index 951b9f231..a2fba1580 100644 --- a/metadrive/tests/test_env/test_naive_multi_agent.py +++ b/metadrive/tests/test_env/test_naive_multi_agent.py @@ -31,10 +31,10 @@ def test_naive_multi_agent_metadrive(): config={ "map": "SSS", "num_agents": 4, - "target_vehicle_configs": {"agent{}".format(i): { + "agent_configs": {"agent{}".format(i): { "spawn_longitude": i * 5 } - for i in range(4)} + for i in range(4)} } ) try: @@ -50,7 +50,7 @@ def test_naive_multi_agent_metadrive(): obses = list(o.values()) assert not np.isclose(obses[0], obses[1], rtol=1e-3, atol=1e-3).all() - pos_z_list = [v.chassis.getNode(0).transform.pos[2] for v in env.vehicles.values()] + pos_z_list = [v.chassis.getNode(0).transform.pos[2] for v in env.agents.values()] for p in pos_z_list: assert p < 5.0 or step <= 10 diff --git a/metadrive/tests/test_env/test_varying_dynamics_env.py b/metadrive/tests/test_env/test_varying_dynamics_env.py index 878298f9b..c93c100ea 100644 --- a/metadrive/tests/test_env/test_varying_dynamics_env.py +++ b/metadrive/tests/test_env/test_varying_dynamics_env.py @@ -11,7 +11,7 @@ def test_varying_dynamics_env(): env.reset(seed=seed) for _ in range(10): env.step(env.action_space.sample()) - dy = env.vehicle.get_dynamics_parameters() + dy = env.agent.get_dynamics_parameters() print("Dynamics: ", dy) dy = np.array(list(dy.values())) if len(dys) > 0: diff --git a/metadrive/tests/test_env/test_waymo_env.py b/metadrive/tests/test_env/test_waymo_env.py index bfa1fa29c..b8b2dd455 100644 --- a/metadrive/tests/test_env/test_waymo_env.py +++ b/metadrive/tests/test_env/test_waymo_env.py @@ -36,7 +36,7 @@ def test_waymo_env(policy, render=False, num_scenarios=3): if i == 999: raise ValueError("Can not arrive dest") - assert env.vehicle.panda_color == sns.color_palette("colorblind")[2] + assert env.agent.panda_color == sns.color_palette("colorblind")[2] finally: env.close() diff --git a/metadrive/tests/test_export_record_scenario/test_save_replay_episode.py b/metadrive/tests/test_export_record_scenario/test_save_replay_episode.py index 827354066..273cc21d8 100644 --- a/metadrive/tests/test_export_record_scenario/test_save_replay_episode.py +++ b/metadrive/tests/test_export_record_scenario/test_save_replay_episode.py @@ -69,7 +69,7 @@ def test_save_episode(vis=False): assert np.isclose(np.array([pos[0], pos[1]]), np.array(step_info[i][old_id][0]), 1e-2, 1e-2).all() assert abs(wrap_to_pi(heading - np.array(step_info[i][old_id][1]))) < 1e-2 - # assert abs(env.vehicle.get_z() - record_pos[-1]) < 1e-3 + # assert abs(env.agent.get_z() - record_pos[-1]) < 1e-3 o, r, tm, tc, info = env.step([0, 1]) if vis: env.render() @@ -105,7 +105,7 @@ def test_save_episode_marl(vis=False): env.engine.spawn_manager.seed(tt) o, _ = env.reset() for i in range(1, 100000 if vis else 600): - o, r, tm, tc, info = env.step({agent_id: [0, .2] for agent_id in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({agent_id: [0, .2] for agent_id in env.agents.keys()}) if vis: env.render() if tm["__all__"]: @@ -136,7 +136,7 @@ def test_save_episode_marl(vis=False): assert np.isclose(np.array([pos[0], pos[1], obj.get_z()]), np.array(record_pos)).all() assert abs(wrap_to_pi(heading - record_heading)) < 1e-2 # print("Replay MARL step: {}".format(i)) - o, r, tm, tc, info = env.step({agent_id: [0, 0.1] for agent_id in env.vehicles.keys()}) + o, r, tm, tc, info = env.step({agent_id: [0, 0.1] for agent_id in env.agents.keys()}) if vis: env.render() if tm["__all__"]: diff --git a/metadrive/tests/test_functionality/test_collision.py b/metadrive/tests/test_functionality/test_collision.py index 59396f33e..fa467a2a4 100644 --- a/metadrive/tests/test_functionality/test_collision.py +++ b/metadrive/tests/test_functionality/test_collision.py @@ -11,7 +11,7 @@ def test_collision_with_vehicle(use_render=False): try: for i in range(1, 500): o, r, tm, tc, info = env.step([0, 1]) - if env.vehicle.crash_vehicle: + if env.agent.crash_vehicle: pass_test = True break assert pass_test, "Collision function is broken!" @@ -26,7 +26,7 @@ def test_collision_with_sidewalk(): try: for i in range(1, 100): o, r, tm, tc, info = env.step([-0.5, 1]) - if env.vehicle.crash_sidewalk: + if env.agent.crash_sidewalk: pass_test = True break assert pass_test, "Collision function is broken!" @@ -42,8 +42,8 @@ def test_line_contact(): try: for i in range(1, 100): o, r, tm, tc, info = env.step([-0.5, 1]) - on_broken_line = on_broken_line or env.vehicle.on_broken_line - on_continuous_line = on_continuous_line or env.vehicle.on_white_continuous_line + on_broken_line = on_broken_line or env.agent.on_broken_line + on_continuous_line = on_continuous_line or env.agent.on_white_continuous_line assert on_broken_line and on_continuous_line, "Collision function is broken!" finally: env.close() diff --git a/metadrive/tests/test_functionality/test_discrete_action.py b/metadrive/tests/test_functionality/test_discrete_action.py index 5ce2f816e..27491f39e 100644 --- a/metadrive/tests/test_functionality/test_discrete_action.py +++ b/metadrive/tests/test_functionality/test_discrete_action.py @@ -47,7 +47,7 @@ def test_discrete_action(): env.reset() assert isinstance(env.action_space, gym.spaces.Discrete) assert env.action_space.n == 15 - v = env.vehicle + v = env.agent policy = env.engine.get_policy(v.name) assert policy.convert_to_continuous_action(0) == (-1, -1) assert policy.convert_to_continuous_action(1) == (0, -1) @@ -78,7 +78,7 @@ def test_multi_discrete_action(): assert isinstance(env.action_space, gym.spaces.MultiDiscrete) assert env.action_space.shape == (2, ) assert all(env.action_space.nvec == (3, 5)) - v = env.vehicle + v = env.agent policy = env.engine.get_policy(v.name) assert policy.convert_to_continuous_action([0, 0]) == (-1, -1) assert policy.convert_to_continuous_action([1, 0]) == (0, -1) diff --git a/metadrive/tests/test_functionality/test_horizon_termination.py b/metadrive/tests/test_functionality/test_horizon_termination.py index 75a92a71a..241f8df40 100644 --- a/metadrive/tests/test_functionality/test_horizon_termination.py +++ b/metadrive/tests/test_functionality/test_horizon_termination.py @@ -29,7 +29,7 @@ def test_horizon(cfg, use_render=False): test_pass = True if horizon is None else False try: for i in range(1, 500): - action = {k: [0, -1] for k in env.vehicles.keys()} if is_marl else [0, -1] + action = {k: [0, -1] for k in env.agents.keys()} if is_marl else [0, -1] o, r, tms, tcs, infos = env.step(action) if not isinstance(tms, dict): tms = dict(__all__=tms) diff --git a/metadrive/tests/test_functionality/test_marl_infinite_agents.py b/metadrive/tests/test_functionality/test_marl_infinite_agents.py index d655cdd8d..b90a96f83 100644 --- a/metadrive/tests/test_functionality/test_marl_infinite_agents.py +++ b/metadrive/tests/test_functionality/test_marl_infinite_agents.py @@ -17,14 +17,14 @@ def test_infinite_agents(): o, _ = env.reset() env.seed(100) env._DEBUG_RANDOM_SEED = 100 - max_num = old_num_of_vehicles = len(env.vehicles) + max_num = old_num_of_vehicles = len(env.agents) for i in range(1, 1000): - o, r, tm, tc, info = env.step({k: [1, 1] for k in env.vehicles}) + o, r, tm, tc, info = env.step({k: [1, 1] for k in env.agents}) # # print( - # "{} Current active agents: ".format(i), len(env.vehicles), ". Objects: ", + # "{} Current active agents: ".format(i), len(env.agents), ". Objects: ", # len(env.agent_manager._object_to_agent) # ) - max_num = max(len(env.vehicles), max_num) + max_num = max(len(env.agents), max_num) # env.render(mode="top_down") for kkk, iii in info.items(): if tm[kkk] or tc[kkk]: @@ -39,12 +39,12 @@ def test_infinite_agents(): env = MultiAgentRoundaboutEnv({"num_agents": -1, "delay_done": 0, "horizon": 50, "debug": True}) try: o, _ = env.reset() - max_num = old_num_of_vehicles = len(env.vehicles) + max_num = old_num_of_vehicles = len(env.agents) for i in range(1, 300): - o, r, tm, tc, info = env.step({k: [0, 1] for k in env.vehicles}) - # # print("Current active agents: ", len(env.vehicles), + o, r, tm, tc, info = env.step({k: [0, 1] for k in env.agents}) + # # print("Current active agents: ", len(env.agents), # ". Objects: ", len(env.agent_manager._object_to_agent)) - max_num = max(len(env.vehicles), max_num) + max_num = max(len(env.agents), max_num) # env.render(mode="top_down") for kkk, iii in info.items(): if tm[kkk] or tc[kkk]: diff --git a/metadrive/tests/test_functionality/test_marl_reborn.py b/metadrive/tests/test_functionality/test_marl_reborn.py index dab51800e..2cae8f5d7 100644 --- a/metadrive/tests/test_functionality/test_marl_reborn.py +++ b/metadrive/tests/test_functionality/test_marl_reborn.py @@ -18,8 +18,8 @@ def test_respawn(): try: assert set(env.action_space.spaces.keys()) == {"agent0", "agent1"} - assert set(env.config["target_vehicle_configs"].keys()) == {"agent0", "agent1"} - assert set(env.vehicles.keys()) == set() # Not initialized yet! + assert set(env.config["agent_configs"].keys()) == {"agent0", "agent1"} + assert set(env.agents.keys()) == set() # Not initialized yet! o, _ = env.reset() @@ -28,8 +28,8 @@ def test_respawn(): assert set(o.keys()) == {"agent0", "agent1"} assert set(env.observations.keys()) == {"agent0", "agent1"} assert set(env.action_space.spaces.keys()) == {"agent0", "agent1"} - assert set(env.config["target_vehicle_configs"].keys()) == {"agent0", "agent1"} - assert set(env.vehicles.keys()) == {"agent0", "agent1"} + assert set(env.config["agent_configs"].keys()) == {"agent0", "agent1"} + assert set(env.agents.keys()) == {"agent0", "agent1"} v_id_0 = "agent0" v_id_1 = "agent1" @@ -65,8 +65,8 @@ def test_respawn(): assert set(o.keys()) == {"agent0", "agent1"} assert set(env.observations.keys()) == {"agent0", "agent1"} assert set(env.action_space.spaces.keys()) == {"agent0", "agent1"} - assert set(env.config["target_vehicle_configs"].keys()) == {"agent0", "agent1"} - assert set(env.vehicles.keys()) == {"agent0", "agent1"} + assert set(env.config["agent_configs"].keys()) == {"agent0", "agent1"} + assert set(env.agents.keys()) == {"agent0", "agent1"} finally: env.close() assert done_count > 0 @@ -80,7 +80,7 @@ def test_delay_done(render=False): { # "use_render": True, # - "target_vehicle_configs": { + "agent_configs": { "agent0": { "spawn_longitude": 12, "spawn_lateral": 0, @@ -105,9 +105,9 @@ def test_delay_done(render=False): o, _ = env.reset() for i in range(1, 300): actions = {"agent0": [1, 1], "agent1": [1, 1]} - if "agent0" not in env.vehicles: + if "agent0" not in env.agents: actions.pop("agent0") - if "agent1" not in env.vehicles: + if "agent1" not in env.agents: actions.pop("agent1") o, r, tm, tc, info = env.step(actions) if agent0_done: @@ -134,10 +134,10 @@ def test_delay_done(render=False): env.reset() dead = set() for _ in range(300): - o, r, tm, tc, i = env.step({k: [1, 1] for k in env.vehicles.keys()}) + o, r, tm, tc, i = env.step({k: [1, 1] for k in env.agents.keys()}) for dead_name in dead: assert dead_name not in o - # print("{} there!".format(env.vehicles.keys())) + # print("{} there!".format(env.agents.keys())) # print("{} dead!".format([kkk for kkk, ddd in d.items() if ddd])) for kkk, ddd in tm.items(): if ddd and kkk != "__all__": diff --git a/metadrive/tests/test_functionality/test_navigation.py b/metadrive/tests/test_functionality/test_navigation.py index a5a19130f..546f229a8 100644 --- a/metadrive/tests/test_functionality/test_navigation.py +++ b/metadrive/tests/test_functionality/test_navigation.py @@ -52,7 +52,7 @@ def test_navigation(vis=False): steering_error = o[0] - target.lateral steering = steering_controller.get_result(steering_error) - acc_error = env.vehicles[DEFAULT_AGENT].speed_km_h - target.speed_km_h + acc_error = env.agents[DEFAULT_AGENT].speed_km_h - target.speed_km_h acc = acc_controller.get_result(acc_error) for i in range(1, 1000000 if vis else 2000): o, r, tm, tc, info = env.step([-steering, acc]) @@ -62,7 +62,7 @@ def test_navigation(vis=False): steering = steering_controller.get_result(steering_error) t_speed = target.speed_km_h if abs(o[12] - 0.5) < 0.01 else target.speed_km_h - 10 - acc_error = env.vehicles[DEFAULT_AGENT].speed_km_h - t_speed + acc_error = env.agents[DEFAULT_AGENT].speed_km_h - t_speed acc = acc_controller.get_result(acc_error) if vis: if i < 700: @@ -87,7 +87,7 @@ def test_navigation(vis=False): steering = steering_controller.get_result(steering_error, o[11]) acc_controller.reset() - acc_error = env.vehicles[DEFAULT_AGENT].speed_km_h - target.speed_km_h + acc_error = env.agents[DEFAULT_AGENT].speed_km_h - target.speed_km_h acc = acc_controller.get_result(acc_error) env.close() diff --git a/metadrive/tests/test_functionality/test_out_of_road.py b/metadrive/tests/test_functionality/test_out_of_road.py index d7730314c..4520294f2 100644 --- a/metadrive/tests/test_functionality/test_out_of_road.py +++ b/metadrive/tests/test_functionality/test_out_of_road.py @@ -17,16 +17,16 @@ def test_out_of_road(): ) try: obs, _ = env.reset() - tolerance = math.sqrt(env.vehicle.WIDTH**2 + env.vehicle.LENGTH**2) / distance + tolerance = math.sqrt(env.agent.WIDTH**2 + env.agent.LENGTH**2) / distance for _ in range(100000000): o, r, tm, tc, i = env.step([steering, 1]) if tm or tc: per = env.engine.get_sensor("side_detector").perceive points = per( - env.vehicle, - env.vehicle.engine.physics_world.static_world, - num_lasers=env.vehicle.config["side_detector"]["num_lasers"], - distance=env.vehicle.config["side_detector"]["distance"] + env.agent, + env.agent.engine.physics_world.static_world, + num_lasers=env.agent.config["side_detector"]["num_lasers"], + distance=env.agent.config["side_detector"]["distance"] ).cloud_points assert min(points) < tolerance, (min(points), tolerance) print( @@ -51,7 +51,7 @@ def useless_left_right_distance_printing(): try: for _ in range(100000000): o, r, tm, tc, i = env.step([steering, 1]) - vehicle = env.vehicle + vehicle = env.agent l, r = vehicle.dist_to_left_side, vehicle.dist_to_right_side total_width = float( (vehicle.navigation.get_current_lane_num() + 1) * vehicle.navigation.get_current_lane_width() diff --git a/metadrive/tests/test_functionality/test_pedestrian.py b/metadrive/tests/test_functionality/test_pedestrian.py index bdd2cc757..0d5c188b0 100644 --- a/metadrive/tests/test_functionality/test_pedestrian.py +++ b/metadrive/tests/test_functionality/test_pedestrian.py @@ -39,7 +39,7 @@ def test_pedestrian(render=False): obj_2 = env.engine.spawn_object(Pedestrian, position=[30, 6], heading_theta=0, random_seed=1) obj_1.set_velocity([1, 0], 1, in_local_frame=True) obj_2.set_velocity([1, 0], 0, in_local_frame=True) - env.vehicle.set_velocity([5, 0], in_local_frame=False) + env.agent.set_velocity([5, 0], in_local_frame=False) for s in range(1, 1000): o, r, tm, tc, info = env.step([0, 0]) # obj_1.set_velocity([1, 0], 2, in_local_frame=True) diff --git a/metadrive/tests/test_functionality/test_random_engine.py b/metadrive/tests/test_functionality/test_random_engine.py index 6bc778f58..0cadb8f22 100644 --- a/metadrive/tests/test_functionality/test_random_engine.py +++ b/metadrive/tests/test_functionality/test_random_engine.py @@ -136,15 +136,15 @@ def test_random_lane_width(): ) try: o, _ = env.reset(seed=12) - old_config_1 = env.vehicle.lane.width + old_config_1 = env.agent.lane.width env.reset(seed=15) - old_config_2 = env.vehicle.lane.width + old_config_2 = env.agent.lane.width env.reset(seed=13) env.reset(seed=12) - new_config = env.vehicle.lane.width + new_config = env.agent.lane.width assert old_config_1 == new_config env.reset(seed=15) - new_config = env.vehicle.lane.width + new_config = env.agent.lane.width assert old_config_2 == new_config assert old_config_1 != old_config_2 finally: @@ -163,21 +163,21 @@ def test_random_lane_num(): ) try: o, _ = env.reset(seed=12) - old_config_1 = env.vehicle.navigation.get_current_lane_num() + old_config_1 = env.agent.navigation.get_current_lane_num() env.reset(seed=15) - old_config_2 = env.vehicle.navigation.get_current_lane_num() + old_config_2 = env.agent.navigation.get_current_lane_num() env.reset(seed=13) env.reset(seed=12) - new_config = env.vehicle.navigation.get_current_lane_num() + new_config = env.agent.navigation.get_current_lane_num() assert old_config_1 == new_config env.reset(seed=15) - new_config = env.vehicle.navigation.get_current_lane_num() + new_config = env.agent.navigation.get_current_lane_num() assert old_config_2 == new_config env.close() env.reset(seed=12) - assert old_config_1 == env.vehicle.navigation.get_current_lane_num() + assert old_config_1 == env.agent.navigation.get_current_lane_num() env.reset(seed=15) - assert old_config_2 == env.vehicle.navigation.get_current_lane_num() + assert old_config_2 == env.agent.navigation.get_current_lane_num() finally: env.close() @@ -194,15 +194,15 @@ def test_random_vehicle_parameter(): ) try: o, _ = env.reset(seed=12) - old_config_1 = env.vehicle.get_config(True) + old_config_1 = env.agent.get_config(True) env.reset(seed=15) - old_config_2 = env.vehicle.get_config(True) + old_config_2 = env.agent.get_config(True) env.reset(seed=13) env.reset(seed=12) - new_config = env.vehicle.get_config(True) + new_config = env.agent.get_config(True) assert recursive_equal(old_config_1, new_config) env.reset(seed=15) - new_config = env.vehicle.get_config(True) + new_config = env.agent.get_config(True) assert recursive_equal(old_config_2, new_config) finally: env.close() diff --git a/metadrive/tests/test_functionality/test_reborn.py b/metadrive/tests/test_functionality/test_reborn.py index 596d863e6..75d1543e7 100644 --- a/metadrive/tests/test_functionality/test_reborn.py +++ b/metadrive/tests/test_functionality/test_reborn.py @@ -11,8 +11,8 @@ def test_traffic_respawn(vis=False): for i in range(1, 3000): env.step([0, 0]) current_v = set(env.engine.traffic_manager.vehicles) - for v in list(env.engine.traffic_manager.traffic_vehicles) + [env.vehicle]: - if v is env.vehicle: + for v in list(env.engine.traffic_manager.traffic_vehicles) + [env.agent]: + if v is env.agent: current_v.discard(v) else: current_v.discard(v) diff --git a/metadrive/tests/test_functionality/test_scenario_randomness.py b/metadrive/tests/test_functionality/test_scenario_randomness.py index c6eb0f030..22da2c287 100644 --- a/metadrive/tests/test_functionality/test_scenario_randomness.py +++ b/metadrive/tests/test_functionality/test_scenario_randomness.py @@ -39,12 +39,10 @@ def test_scenario_randomness(vis=False): try: positions_1 = [] o, _ = env.reset() - positions_1.append([env.vehicle.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles]) + positions_1.append([env.agent.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles]) for i in range(1, 100000 if vis else 2000): o, r, tm, tc, info = env.step([0, 1]) - positions_1.append( - [env.vehicle.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles] - ) + positions_1.append([env.agent.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles]) if tm or tc: break env.close() @@ -52,12 +50,12 @@ def test_scenario_randomness(vis=False): env = SafeMetaDriveEnv(cfg) o, _ = env.reset() old_position = positions_1.pop() - new_position = [env.vehicle.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles] + new_position = [env.agent.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles] assert_equal_pos(old_position, new_position) for i in range(1, 100000 if vis else 2000): o, r, tm, tc, info = env.step([0, 1]) old_position = positions_1.pop() - new_position = [env.vehicle.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles] + new_position = [env.agent.position] + [v.position for v in env.engine.traffic_manager.traffic_vehicles] assert_equal_pos(old_position, new_position) if tm or tc: break diff --git a/metadrive/tests/test_functionality/test_sensors_config.py b/metadrive/tests/test_functionality/test_sensors_config.py index 8b1b2f870..16653db29 100644 --- a/metadrive/tests/test_functionality/test_sensors_config.py +++ b/metadrive/tests/test_functionality/test_sensors_config.py @@ -27,9 +27,9 @@ def test_sensor_config(): assert 50 in env.engine.get_sensor("lidar").broad_detectors assert len(env.engine.get_sensor("lidar").broad_detectors) == 1 env.engine.get_sensor("lidar_new").perceive( - env.vehicle, + env.agent, physics_world=env.engine.physics_world.dynamic_world, - num_lasers=env.vehicle.config["lidar"]["num_lasers"], + num_lasers=env.agent.config["lidar"]["num_lasers"], distance=100.5, detector_mask=None ) @@ -46,9 +46,9 @@ def test_sensor_config(): assert 50 in env.engine.get_sensor("lidar").broad_detectors assert len(env.engine.get_sensor("lidar").broad_detectors) == 1 env.engine.get_sensor("lidar_new").perceive( - env.vehicle, + env.agent, physics_world=env.engine.physics_world.dynamic_world, - num_lasers=env.vehicle.config["lidar"]["num_lasers"], + num_lasers=env.agent.config["lidar"]["num_lasers"], distance=100.5, detector_mask=None ) diff --git a/metadrive/tests/test_functionality/test_traffic_mode.py b/metadrive/tests/test_functionality/test_traffic_mode.py index 148d08a25..f5c7b0e02 100644 --- a/metadrive/tests/test_functionality/test_traffic_mode.py +++ b/metadrive/tests/test_functionality/test_traffic_mode.py @@ -16,7 +16,7 @@ def test_traffic_mode(render=False): ) o, _ = env.reset() - env.vehicle.set_velocity([1, 0.1], 10) + env.agent.set_velocity([1, 0.1], 10) if mode == "respawn": assert len(env.engine.traffic_manager._traffic_vehicles) != 0 elif mode == "hybrid" or mode == "trigger": diff --git a/metadrive/tests/test_installation.py b/metadrive/tests/test_installation.py index 5d436789d..da15e6f30 100644 --- a/metadrive/tests/test_installation.py +++ b/metadrive/tests/test_installation.py @@ -44,7 +44,7 @@ def capture_headless_image(cuda, image_source="main_camera"): ) cam = env.engine.get_sensor(image_source) cam.save_image( - env.vehicle, + env.agent, os.path.join( MetaDrive_PACKAGE_DIR, "examples", "{}_from_buffer{}.png".format(image_source, "_cuda" if cuda else "") ) diff --git a/metadrive/tests/test_policy/test_expert_performance.py b/metadrive/tests/test_policy/test_expert_performance.py index e871ff8af..c2c5afa7c 100644 --- a/metadrive/tests/test_policy/test_expert_performance.py +++ b/metadrive/tests/test_policy/test_expert_performance.py @@ -19,13 +19,13 @@ def _evaluate(env_config, num_episode, has_traffic=True, need_on_same_lane=True) lidar_success = False success_list, reward_list, ep_reward, ep_len, ep_count = [], [], 0, 0, 0 while ep_count < num_episode: - action = expert(env.vehicle, deterministic=True) + action = expert(env.agent, deterministic=True) obs, reward, terminated, truncated, info = env.step(action) if need_on_same_lane: - assert lane_idx_need_to_stay == env.vehicle.lane_index[-1], "Not one the same lane" + assert lane_idx_need_to_stay == env.agent.lane_index[-1], "Not one the same lane" # double check lidar if env.config["use_render"]: - env.render(text={"lane_index": env.vehicle.lane_index, "step": env.episode_step}) + env.render(text={"lane_index": env.agent.lane_index, "step": env.episode_step}) lidar = [True if p == 1.0 else False for p in env.observations[DEFAULT_AGENT].cloud_points] if not all(lidar): lidar_success = True @@ -37,9 +37,7 @@ def _evaluate(env_config, num_episode, has_traffic=True, need_on_same_lane=True) reward_list.append(ep_reward) ep_reward = 0 ep_len = 0 - env.config["target_vehicle_configs"]["default_agent"]["spawn_lane_index"] = ( - ">", ">>", len(reward_list) % 3 - ) + env.config["agent_configs"]["default_agent"]["spawn_lane_index"] = (">", ">>", len(reward_list) % 3) lane_idx_need_to_stay = len(reward_list) % 3 obs, _ = env.reset() if has_traffic: diff --git a/metadrive/tests/test_policy/test_lane_change_policy.py b/metadrive/tests/test_policy/test_lane_change_policy.py index 0cc07ea4b..1a971e223 100644 --- a/metadrive/tests/test_policy/test_lane_change_policy.py +++ b/metadrive/tests/test_policy/test_lane_change_policy.py @@ -76,13 +76,13 @@ def test_lane_change(render=False): o, _ = env.reset() for s in range(1, 60): o, r, tm, tc, info = env.step([2, 3]) - assert env.vehicle.lane.index[-1] == 0 + assert env.agent.lane.index[-1] == 0 for s in range(1, 40): o, r, tm, tc, info = env.step([0, 3]) - assert env.vehicle.lane.index[-1] == 2 + assert env.agent.lane.index[-1] == 2 for s in range(1, 70): o, r, tm, tc, info = env.step([1, 3]) - assert env.vehicle.lane.index[-1] == 2 + assert env.agent.lane.index[-1] == 2 finally: env.close() diff --git a/metadrive/tests/test_sensors/test_main_camera.py b/metadrive/tests/test_sensors/test_main_camera.py index 5be050555..416175633 100644 --- a/metadrive/tests/test_sensors/test_main_camera.py +++ b/metadrive/tests/test_sensors/test_main_camera.py @@ -30,6 +30,9 @@ def test_main_camera(config, render=False): "window_size": (config["width"], config["height"]), "stack_size": config["stack_size"], "vehicle_config": dict(image_source="main_camera"), + "sensors": { + "main_camera": () + }, "interface_panel": [], "image_observation": True, # it is a switch telling metadrive to use rgb as observation "norm_pixel": config["norm_pixel"], # clip rgb to range(0,1) instead of (0, 255) @@ -42,7 +45,7 @@ def test_main_camera(config, render=False): start = time.time() for i in range(1, 10): o, r, tm, tc, info = env.step([0, 1]) - assert "LANE_SURFACE_STREET" in env.vehicle.contact_results + assert "LANE_SURFACE_STREET" in env.agent.contact_results assert env.observation_space.contains(o) # Reverse assert o["image"].shape == (config["height"], config["width"], 3, config["stack_size"]) diff --git a/metadrive/tests/test_sensors/test_sensor_creation.py b/metadrive/tests/test_sensors/test_sensor_creation.py new file mode 100644 index 000000000..25279f19f --- /dev/null +++ b/metadrive/tests/test_sensors/test_sensor_creation.py @@ -0,0 +1,241 @@ +from metadrive.component.sensors.rgb_camera import RGBCamera +from metadrive.component.sensors.semantic_camera import SemanticCamera +from metadrive.envs.metadrive_env import MetaDriveEnv + + +def test_creation(): + """ + Test sensor creation. + """ + + # image_observation: True, use_render: True, request main_camera + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + "main_camera": (), + }, + "vehicle_config": dict(image_source="main_camera"), + "interface_panel": ["rgb", "main_camera", "dashboard"], + "image_observation": True, + "use_render": True, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", "side_detector", "lane_line_detector", "semantic", "rgb", "main_camera", "dashboard" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == ["rgb", "dashboard"] + assert not env.engine.main_window_disabled + env.close() + + # image_observation: True, use_render: False, request main_camera, dashboard + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + "main_camera": (), + }, + "vehicle_config": dict(image_source="main_camera"), + "interface_panel": ["rgb", "dashboard"], + "image_observation": True, + "use_render": False, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", "side_detector", "lane_line_detector", "semantic", "rgb", "dashboard", "main_camera" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == ["rgb", "dashboard"] + assert not env.engine.main_window_disabled + env.close() + + # image_observation: False + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + }, + "interface_panel": ["rgb", "dashboard"], + "image_observation": False, # it is a switch telling metadrive to use rgb as observation + } + ) + env.reset() + assert env.config["sensors"].keys() == {"lidar", "side_detector", "lane_line_detector"} == env.engine.sensors.keys() + assert env.config["interface_panel"] == [] + assert env.engine.main_window_disabled + env.close() + + # image_observation: False, request main_camera + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + "main_camera": (), + }, + "interface_panel": ["rgb", "dashboard"], + "image_observation": False, # it is a switch telling metadrive to use rgb as observation + } + ) + env.reset() + assert env.config["sensors"].keys() == {"lidar", "side_detector", "lane_line_detector"} == env.engine.sensors.keys() + assert env.config["interface_panel"] == [] + assert env.engine.main_window_disabled + env.close() + + # image_observation: False, use_render: True + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + }, + "interface_panel": ["rgb", "dashboard"], + "image_observation": False, + "use_render": True, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", "side_detector", "lane_line_detector", "semantic", "rgb", "main_camera", "dashboard" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == ["rgb", "dashboard"] + assert not env.engine.main_window_disabled + env.close() + + # image_observation: True, use_render: False + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + }, + "vehicle_config": dict(image_source="rgb"), + "interface_panel": ["rgb", "dashboard"], + "image_observation": True, + "use_render": False, + } + ) + env.reset() + assert env.config["sensors"].keys() == {"lidar", "side_detector", "lane_line_detector", "semantic", "rgb" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == [] + assert env.engine.main_window_disabled + env.close() + + # image_observation: True, use_render: True + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + }, + "vehicle_config": dict(image_source="rgb"), + "interface_panel": ["rgb", "dashboard"], + "image_observation": True, + "use_render": True, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", "side_detector", "lane_line_detector", "semantic", "rgb", "dashboard", "main_camera" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == ["rgb", "dashboard"] + assert not env.engine.main_window_disabled + env.close() + + # image_observation: True, use_render: False, request main_camera + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + "main_camera": (), + }, + "vehicle_config": dict(image_source="rgb"), + "interface_panel": ["rgb", "dashboard"], + "image_observation": True, + "use_render": False, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", "side_detector", "lane_line_detector", "semantic", "rgb", "dashboard", "main_camera" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == ["rgb", "dashboard"] + assert not env.engine.main_window_disabled + env.close() + + # image_observation: False, use_render: True, request main_camera + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + "main_camera": (), + }, + "vehicle_config": dict(image_source="main_camera"), + "interface_panel": ["rgb", "main_camera"], + "image_observation": False, + "use_render": True, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", "side_detector", "lane_line_detector", "semantic", "rgb", "main_camera" + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == ["rgb"] + assert not env.engine.main_window_disabled + env.close() + + # image_observation: False, use_render: False, request main_camera + env = MetaDriveEnv( + { + "show_terrain": False, + "window_size": (16, 16), + "sensors": { + "semantic": (SemanticCamera, 400, 300), + "rgb": (RGBCamera, 400, 300), + "main_camera": (), + }, + "vehicle_config": dict(image_source="main_camera"), + "interface_panel": ["rgb", "main_camera"], + "image_observation": False, + "use_render": False, + } + ) + env.reset() + assert env.config["sensors"].keys() == { + "lidar", + "side_detector", + "lane_line_detector", + } == env.engine.sensors.keys() + assert env.config["interface_panel"] == [] + assert env.engine.main_window_disabled + env.close() + + +if __name__ == '__main__': + test_creation() diff --git a/metadrive/tests/tools/adjust_collision_model.py b/metadrive/tests/tools/adjust_collision_model.py index 4dbcf3537..9c9f0fd1e 100644 --- a/metadrive/tests/tools/adjust_collision_model.py +++ b/metadrive/tests/tools/adjust_collision_model.py @@ -41,7 +41,7 @@ o, _ = env.reset() def get_v_path(): - return BaseVehicle.model_collection[env.vehicle.path[0]] + return BaseVehicle.model_collection[env.agent.path[0]] def add_x(): model = get_v_path() @@ -72,9 +72,9 @@ def decrease_y(): o, r, tm, tc, i = env.step([0, 0]) env.render( text={ - "heading_diff": env.vehicle.heading_diff(env.vehicle.lane), - "lane_width": env.vehicle.lane.width, - "lateral": env.vehicle.lane.local_coordinates(env.vehicle.position), + "heading_diff": env.agent.heading_diff(env.agent.lane), + "lane_width": env.agent.lane.width, + "lateral": env.agent.lane.local_coordinates(env.agent.position), "current_seed": env.current_seed } ) diff --git a/metadrive/tests/vis_block/vis_block_base.py b/metadrive/tests/vis_block/vis_block_base.py index a28658b5d..8feeb04ff 100644 --- a/metadrive/tests/vis_block/vis_block_base.py +++ b/metadrive/tests/vis_block/vis_block_base.py @@ -44,7 +44,7 @@ def __init__(self, debug=False, window_type="onscreen"): self.accept("4", self.render.analyze) self.accept("f1", self.toggleDebug) self.accept("1", self.toggleDebug) - self.vehicle = None + self.agent = None self.inputs = None def toggleDebug(self): diff --git a/metadrive/tests/vis_block/vis_yy.py b/metadrive/tests/vis_block/vis_yy.py index c755b57b0..fd22c4443 100644 --- a/metadrive/tests/vis_block/vis_yy.py +++ b/metadrive/tests/vis_block/vis_yy.py @@ -44,12 +44,12 @@ print("vehicle num", len(env.engine.traffic_manager.vehicles)) for i in range(1, 100000): o, r, tm, tc, info = env.step([0, 1]) - info["fuel"] = env.vehicle.energy_consumption + info["fuel"] = env.agent.energy_consumption env.render( text={ - "left": env.vehicle.dist_to_left_side, - "right": env.vehicle.dist_to_right_side, - "white_lane_line": env.vehicle.on_white_continuous_line + "left": env.agent.dist_to_left_side, + "right": env.agent.dist_to_right_side, + "white_lane_line": env.agent.on_white_continuous_line } ) if tm or tc: diff --git a/metadrive/tests/vis_env/vis_acc_break.py b/metadrive/tests/vis_env/vis_acc_break.py index b868e6941..c6901de8b 100644 --- a/metadrive/tests/vis_env/vis_acc_break.py +++ b/metadrive/tests/vis_env/vis_acc_break.py @@ -24,11 +24,11 @@ a = [.0, 1.] for s in range(1, 100000): o, r, tm, tc, info = env.step(a) - if env.vehicle.speed_km_h > 100: + if env.agent.speed_km_h > 100: a = [0, -1] # print("0-100 km/h acc use time:{}".format(s * 0.1)) - pre_pos = env.vehicle.position[0] - if a == [0, -1] and env.vehicle.speed_km_h < 1: - # print("0-100 brake use dist:{}".format(env.vehicle.position[0] - pre_pos)) + pre_pos = env.agent.position[0] + if a == [0, -1] and env.agent.speed_km_h < 1: + # print("0-100 brake use dist:{}".format(env.agent.position[0] - pre_pos)) break env.close() diff --git a/metadrive/tests/vis_env/vis_metadrive_env.py b/metadrive/tests/vis_env/vis_metadrive_env.py index 68a1273a6..30ab4a467 100644 --- a/metadrive/tests/vis_env/vis_metadrive_env.py +++ b/metadrive/tests/vis_env/vis_metadrive_env.py @@ -121,14 +121,14 @@ def lift_terrain_x(): env.engine.accept("0", lower_terrain) env.engine.accept("`", env.engine.terrain.reload_terrain_shader) # env.main_camera.set_follow_lane(True) - # env.vehicle.get_camera("rgb_camera").save_image(env.vehicle) + # env.agent.get_camera("rgb_camera").save_image(env.agent) # for line in env.engine.coordinate_line: - # line.reparentTo(env.vehicle.origin) - # env.vehicle.set_velocity([5, 0], in_local_frame=True) + # line.reparentTo(env.agent.origin) + # env.agent.set_velocity([5, 0], in_local_frame=True) print(time.time() - start) # origin = env.engine.terrain.mesh_collision_terrain.getPos() for s in range(1, 100000): - # env.vehicle.set_velocity([1, 0], in_local_frame=True) + # env.agent.set_velocity([1, 0], in_local_frame=True) o, r, tm, tc, info = env.step([0, 0]) # env.render( # text={ @@ -142,19 +142,19 @@ def lift_terrain_x(): # env.reset() # print(time.time() - s) - # env.vehicle.set_pitch(-np.pi/4) + # env.agent.set_pitch(-np.pi/4) # [0.09231533, 0.491018, 0.47076905, 0.7691619, 0.5, 0.5, 1.0, 0.0, 0.48037243, 0.8904728, 0.81229943, 0.7317231, 1.0, 0.85320455, 0.9747932, 0.65675277, 0.0, 0.5, 0.5] # else: # if s % 100 == 0: # env.close() # env.reset() - # info["fuel"] = env.vehicle.energy_consumption + # info["fuel"] = env.agent.energy_consumption # env.render( # text={ - # # "heading_diff": env.vehicle.heading_diff(env.vehicle.lane), - # # "lane_width": env.vehicle.lane.width, - # # "lane_index": env.vehicle.lane_index, - # # "lateral": env.vehicle.lane.local_coordinates(env.vehicle.position), + # # "heading_diff": env.agent.heading_diff(env.agent.lane), + # # "lane_width": env.agent.lane.width, + # # "lane_index": env.agent.lane_index, + # # "lateral": env.agent.lane.local_coordinates(env.agent.position), # "current_seed": env.current_seed # } # ) diff --git a/metadrive/tests/vis_env/vis_multi_agent_env.py b/metadrive/tests/vis_env/vis_multi_agent_env.py index cecec3788..7492e106d 100644 --- a/metadrive/tests/vis_env/vis_multi_agent_env.py +++ b/metadrive/tests/vis_env/vis_multi_agent_env.py @@ -11,10 +11,10 @@ def __init__(self): "num_agents": 4, "force_destroy": True, "manual_control": True, - "target_vehicle_configs": {"agent{}".format(i): { + "agent_configs": {"agent{}".format(i): { "spawn_longitude": i * 5 } - for i in range(4)} + for i in range(4)} } ) diff --git a/metadrive/tests/vis_env/vis_topdown.py b/metadrive/tests/vis_env/vis_topdown.py index 67eb21eab..be924bf84 100644 --- a/metadrive/tests/vis_env/vis_topdown.py +++ b/metadrive/tests/vis_env/vis_topdown.py @@ -45,18 +45,18 @@ # [9.95036221 0.99503618] start = time.time() o, _ = env.reset() - # env.vehicle.set_velocity([1, 0.1], 10) - # print(env.vehicle.speed) + # env.agent.set_velocity([1, 0.1], 10) + # print(env.agent.speed) for s in range(1, 10000): o, r, tm, tc, info = env.step([0, 0.]) - # print("heading: {} forward_direction: {}".format(env.vehicle.heading, env.vehicle.velocity_direction)) + # print("heading: {} forward_direction: {}".format(env.agent.heading, env.agent.velocity_direction)) - # env.vehicle.set_velocity([1, 10], 10) - # # print(env.vehicle.velocity) + # env.agent.set_velocity([1, 10], 10) + # # print(env.agent.velocity) # if s % 100 == 0: # env.close() # env.reset() - # info["fuel"] = env.vehicle.energy_consumption + # info["fuel"] = env.agent.energy_consumption env.render(track_target_vehicle=True, mode="top_down", semantic_map=True) diff --git a/metadrive/tests/vis_env/vis_waymo_env.py b/metadrive/tests/vis_env/vis_waymo_env.py index 43543208b..b02be4de5 100644 --- a/metadrive/tests/vis_env/vis_waymo_env.py +++ b/metadrive/tests/vis_env/vis_waymo_env.py @@ -41,7 +41,7 @@ def reset(self, seed=None): for i in range(1, 100000): o, r, tm, tc, info = env.step([1.0, 0.]) - # print(env.vehicle.height) + # print(env.agent.height) env.render(text={"seed": env.current_seed, "reward": r}) if tm or tc: # print(info["arrive_dest"]) diff --git a/metadrive/tests/vis_functionality/profile_rgb_cam.py b/metadrive/tests/vis_functionality/profile_rgb_cam.py index 8f5d0276e..ce2f27cf1 100644 --- a/metadrive/tests/vis_functionality/profile_rgb_cam.py +++ b/metadrive/tests/vis_functionality/profile_rgb_cam.py @@ -21,9 +21,7 @@ ) env.reset() # # print m to capture rgb observation - env.engine.accept( - "m", env.vehicle.get_camera(env.vehicle.config["image_source"]).save_image, extraArgs=[env.vehicle] - ) + env.engine.accept("m", env.agent.get_camera(env.agent.config["image_source"]).save_image, extraArgs=[env.agent]) start = time.time() for i in range(1, 100000): o, r, tm, tc, info = env.step([0, 0]) diff --git a/metadrive/tests/vis_functionality/vis_depth_cam.py b/metadrive/tests/vis_functionality/vis_depth_cam.py index 1136d02b5..71f1a6b6b 100644 --- a/metadrive/tests/vis_functionality/vis_depth_cam.py +++ b/metadrive/tests/vis_functionality/vis_depth_cam.py @@ -28,14 +28,14 @@ env.reset() def get_image(env): - depth_cam = env.vehicle.get_camera(env.vehicle.config["image_source"]) - rgb_cam = env.vehicle.get_camera("rgb_camera") + depth_cam = env.agent.get_camera(env.agent.config["image_source"]) + rgb_cam = env.agent.get_camera("rgb_camera") for h in range(-180, 180, 20): env.engine.graphicsEngine.renderFrame() depth_cam.get_cam().setH(h) rgb_cam.get_cam().setH(h) - depth_cam.save_image(env.vehicle, "depth_{}.jpg".format(h)) - rgb_cam.save_image(env.vehicle, "rgb_{}.jpg".format(h)) + depth_cam.save_image(env.agent, "depth_{}.jpg".format(h)) + rgb_cam.save_image(env.agent, "rgb_{}.jpg".format(h)) env.engine.screenshot() env.engine.accept("m", get_image, extraArgs=[env]) diff --git a/metadrive/tests/vis_functionality/vis_grayscale_cam.py b/metadrive/tests/vis_functionality/vis_grayscale_cam.py index f4d3fb791..8aa46b94d 100644 --- a/metadrive/tests/vis_functionality/vis_grayscale_cam.py +++ b/metadrive/tests/vis_functionality/vis_grayscale_cam.py @@ -21,17 +21,15 @@ ) env.reset() # # print m to capture rgb observation - env.engine.accept( - "m", env.vehicle.get_camera(env.vehicle.config["image_source"]).save_image, extraArgs=[env.vehicle] - ) + env.engine.accept("m", env.agent.get_camera(env.agent.config["image_source"]).save_image, extraArgs=[env.agent]) import cv2 for i in range(1, 100000): o, r, tm, tc, info = env.step([0, 1]) assert env.observation_space.contains(o) # save - rgb_cam = env.vehicle.get_camera(env.vehicle.config["image_source"]) - # rgb_cam.save_image(env.vehicle, name="{}.png".format(i)) + rgb_cam = env.agent.get_camera(env.agent.config["image_source"]) + # rgb_cam.save_image(env.agent, name="{}.png".format(i)) cv2.imshow('img', o["image"][..., -1] / 255) cv2.waitKey(0) diff --git a/metadrive/tests/vis_functionality/vis_mini_map.py b/metadrive/tests/vis_functionality/vis_mini_map.py index 7796fbaaf..6748106f9 100644 --- a/metadrive/tests/vis_functionality/vis_mini_map.py +++ b/metadrive/tests/vis_functionality/vis_mini_map.py @@ -14,7 +14,7 @@ } ) env.reset() - env.engine.accept("m", env.vehicle.get_camera([env.config["image_source"]]).save_image) + env.engine.accept("m", env.agent.get_camera([env.config["image_source"]]).save_image) for i in range(1, 100000): o, r, tm, tc, info = env.step([0, 1]) diff --git a/metadrive/tests/vis_functionality/vis_pedestrian.py b/metadrive/tests/vis_functionality/vis_pedestrian.py index ca9801045..86ce95a83 100644 --- a/metadrive/tests/vis_functionality/vis_pedestrian.py +++ b/metadrive/tests/vis_functionality/vis_pedestrian.py @@ -65,7 +65,7 @@ obj_2.set_velocity([1, 0], 2, in_local_frame=True) c_1.set_velocity([3, 0], 2, in_local_frame=True) # obj_2.show_coordinates() - env.vehicle.set_velocity([10, 0], in_local_frame=False) + env.agent.set_velocity([10, 0], in_local_frame=False) for s in range(1, 10000): # print(c_1.heading_theta) o, r, tm, tc, info = env.step(env.action_space.sample()) @@ -91,12 +91,12 @@ # if s % 100 == 0: # env.close() # env.reset() - # info["fuel"] = env.vehicle.energy_consumption + # info["fuel"] = env.agent.energy_consumption env.render( text={ - "heading_diff": env.vehicle.heading_diff(env.vehicle.lane), - "lane_width": env.vehicle.lane.width, - "lateral": env.vehicle.lane.local_coordinates(env.vehicle.position), + "heading_diff": env.agent.heading_diff(env.agent.lane), + "lane_width": env.agent.lane.width, + "lateral": env.agent.lane.local_coordinates(env.agent.position), "current_seed": env.current_seed, "step": s, } diff --git a/metadrive/tests/vis_functionality/vis_render_msg.py b/metadrive/tests/vis_functionality/vis_render_msg.py index f8915d3a9..b30d9995a 100644 --- a/metadrive/tests/vis_functionality/vis_render_msg.py +++ b/metadrive/tests/vis_functionality/vis_render_msg.py @@ -29,5 +29,5 @@ env.reset() for i in range(1, 100000): o, r, tm, tc, info = env.step([0, 1]) - env.render(text={"Frame": i, "Speed": env.vehicle.speed_km_h}) + env.render(text={"Frame": i, "Speed": env.agent.speed_km_h}) env.close() diff --git a/metadrive/tests/vis_functionality/vis_rgb_cam.py b/metadrive/tests/vis_functionality/vis_rgb_cam.py index 261f441fc..0a887eac7 100644 --- a/metadrive/tests/vis_functionality/vis_rgb_cam.py +++ b/metadrive/tests/vis_functionality/vis_rgb_cam.py @@ -34,8 +34,8 @@ o, r, tm, tc, info = env.step([0, 1]) assert env.observation_space.contains(o) # save - rgb_cam = env.engine.get_sensor(env.vehicle.config["image_source"]) - # rgb_cam.save_image(env.vehicle, name="{}.png".format(i)) + rgb_cam = env.engine.get_sensor(env.agent.config["image_source"]) + # rgb_cam.save_image(env.agent, name="{}.png".format(i)) cv2.imshow('img', o["image"][..., -1]) cv2.waitKey(1) diff --git a/metadrive/tests/vis_functionality/vis_rgb_depth_cam.py b/metadrive/tests/vis_functionality/vis_rgb_depth_cam.py index 3dc4c3f07..44ab56df7 100644 --- a/metadrive/tests/vis_functionality/vis_rgb_depth_cam.py +++ b/metadrive/tests/vis_functionality/vis_rgb_depth_cam.py @@ -8,14 +8,14 @@ if __name__ == "__main__": def get_image(env): - depth_cam = env.vehicle.get_camera(env.vehicle.config["image_source"]) - rgb_cam = env.vehicle.get_camera("rgb_camera") + depth_cam = env.agent.get_camera(env.agent.config["image_source"]) + rgb_cam = env.agent.get_camera("rgb_camera") for h in range(-180, 180, 20): env.engine.graphicsEngine.renderFrame() depth_cam.get_cam().setH(h) rgb_cam.get_cam().setH(h) - depth_cam.save_image(env.vehicle, "depth_{}.jpg".format(h)) - rgb_cam.save_image(env.vehicle, "rgb_{}.jpg".format(h)) + depth_cam.save_image(env.agent, "depth_{}.jpg".format(h)) + rgb_cam.save_image(env.agent, "rgb_{}.jpg".format(h)) # env.engine.screenshot() env = SafeMetaDriveEnv( diff --git a/metadrive/tests/vis_functionality/vis_semantic_cam.py b/metadrive/tests/vis_functionality/vis_semantic_cam.py index 2c3b8418d..d00a09a1f 100644 --- a/metadrive/tests/vis_functionality/vis_semantic_cam.py +++ b/metadrive/tests/vis_functionality/vis_semantic_cam.py @@ -8,13 +8,13 @@ if __name__ == "__main__": def get_image(env): - semantic_cam = env.engine.get_sensor(env.vehicle.config["image_source"]) + semantic_cam = env.engine.get_sensor(env.agent.config["image_source"]) # for h in range(-180, 180, 20): # env.engine.graphicsEngine.renderFrame() # semantic_cam.get_cam().setH(h) # # rgb_cam.get_cam().setH(h) - semantic_cam.save_image(env.vehicle, "semantic.jpg".format()) - # rgb_cam.save_image(env.vehicle, "rgb_{}.jpg".format(h)) + semantic_cam.save_image(env.agent, "semantic.jpg".format()) + # rgb_cam.save_image(env.agent, "rgb_{}.jpg".format(h)) # env.engine.screenshot() env = ScenarioEnv( @@ -47,8 +47,8 @@ def get_image(env): o, r, tm, tc, info = env.step([0, 1]) assert env.observation_space.contains(o) # save - # rgb_cam = env.engine.get_sensor(env.vehicle.config["image_source"]) - # # rgb_cam.save_image(env.vehicle, name="{}.png".format(i)) + # rgb_cam = env.engine.get_sensor(env.agent.config["image_source"]) + # # rgb_cam.save_image(env.agent, name="{}.png".format(i)) # cv2.imshow('img', o["image"][..., -1]) # cv2.waitKey(1) diff --git a/metadrive/tests/vis_functionality/vis_two_speed_retrieve.py b/metadrive/tests/vis_functionality/vis_two_speed_retrieve.py index 8a2380328..4a6a25bf9 100644 --- a/metadrive/tests/vis_functionality/vis_two_speed_retrieve.py +++ b/metadrive/tests/vis_functionality/vis_two_speed_retrieve.py @@ -27,8 +27,8 @@ o, r, tm, tc, info = env.step(acc) # print( # "new:{}, old:{}, diff:{}".format( - # env.vehicle.speed_km_h, env.vehicle.system.get_current_speed_km_hour(), - # env.vehicle.speed_km_h - env.vehicle.system.get_current_speed_km_hour() + # env.agent.speed_km_h, env.agent.system.get_current_speed_km_hour(), + # env.agent.speed_km_h - env.agent.system.get_current_speed_km_hour() # ) # ) env.render("Test: {}".format(i)) diff --git a/metadrive/utils/pg/utils.py b/metadrive/utils/pg/utils.py index 5d53b1a98..f4fbee8f3 100644 --- a/metadrive/utils/pg/utils.py +++ b/metadrive/utils/pg/utils.py @@ -11,7 +11,6 @@ from metadrive.component.lane.pg_lane import PGLane from metadrive.constants import CollisionGroup from metadrive.constants import Decoration, MetaDriveType -from metadrive.engine.core.engine_core import EngineCore from metadrive.engine.physics_node import BaseRigidBodyNode, BaseGhostBodyNode from metadrive.utils.coordinates_shift import panda_heading from metadrive.utils.coordinates_shift import panda_vector @@ -152,7 +151,7 @@ def get_all_lanes(roadnet: "NodeRoadNetwork"): def ray_localization( heading: tuple, position: tuple, - engine: EngineCore, + engine, use_heading_filter=True, return_on_lane=False, ) -> Union[List[Tuple], Tuple]: @@ -212,7 +211,7 @@ def ray_localization( def rect_region_detection( - engine: EngineCore, + engine, position: Tuple, heading: float, heading_direction_length: float, @@ -258,7 +257,7 @@ def rect_region_detection( def circle_region_detection( - engine: EngineCore, position: Tuple, radius: float, detection_group: int, height=10, in_static_world=False + engine, position: Tuple, radius: float, detection_group: int, height=10, in_static_world=False ): """ :param engine: BaseEngine class diff --git a/metadrive/utils/utils.py b/metadrive/utils/utils.py index 72f90470a..f53d15a0c 100644 --- a/metadrive/utils/utils.py +++ b/metadrive/utils/utils.py @@ -231,6 +231,27 @@ def _wrapper(*args, **kwargs): return _wrapper +def time_me_with_prefix(prefix): + """ + Wrapper for testing the function time + Args: + prefix: add a string to the function name itself + + Returns: None + + """ + def decorator(fn): + def _wrapper(*args, **kwargs): + start = time.time() + ret = fn(*args, **kwargs) + print(prefix, "function: %s cost %s second" % (fn.__name__, time.time() - start)) + return ret + + return _wrapper + + return decorator + + def create_rectangle_from_midpoints(p1, p2, width, length_factor=1.0): """ Create the vertices of a rectangle given two midpoints on opposite sides, the width of the rectangle,