From e232df893fb564efd19ef99e239e6cbc53712780 Mon Sep 17 00:00:00 2001 From: Aidan Curtis Date: Thu, 30 Jan 2025 21:08:28 -0500 Subject: [PATCH] updates --- output.txt | 3 + .../llm_planning_prompts/zero_shot.txt | 4 +- predicators/envs/mock_spot_env.py | 2 +- scripts/mock_experiments.py | 110 +++++++++--------- 4 files changed, 62 insertions(+), 57 deletions(-) create mode 100644 output.txt diff --git a/output.txt b/output.txt new file mode 100644 index 000000000..76f14af2a --- /dev/null +++ b/output.txt @@ -0,0 +1,3 @@ +['python', 'predicators/main.py', '--env', 'mock_spot_drawer_cleaning', '--seed', '0', '--num_train_tasks', '0', '--num_test_tasks', '1', '--bilevel_plan_without_sim', 'True', '--horizon', '20', '--load_approach', '--approach', 'vlm_open_loop', '--perceiver', 'mock_spot_perceiver', '--vlm_model_name', 'gpt-4o-mini', '--llm_temperature', '0.2', '--execution_monitor', 'mpc'] +['python', 'predicators/main.py', '--env', 'mock_spot_drawer_cleaning', '--seed', '0', '--num_train_tasks', '0', '--num_test_tasks', '1', '--bilevel_plan_without_sim', 'True', '--horizon', '20', '--load_approach', '--approach', 'vlm_open_loop', '--perceiver', 'mock_spot_perceiver', '--vlm_model_name', 'gpt-4o-mini', '--llm_temperature', '0.2', '--execution_monitor', 'mpc'] +pybullet build time: Nov 28 2023 23:52:03 diff --git a/predicators/approaches/llm_planning_prompts/zero_shot.txt b/predicators/approaches/llm_planning_prompts/zero_shot.txt index e1f955b30..14c6a80f3 100644 --- a/predicators/approaches/llm_planning_prompts/zero_shot.txt +++ b/predicators/approaches/llm_planning_prompts/zero_shot.txt @@ -4,11 +4,12 @@ as well as continuous arguments (indicated by `params_space` field, which is for {options} +Preconditions indicate the properties of the scene that must be true for you to execute an action. The effects are what will happen to the scene when you execute the actions. You are only allowed to use the provided skills. It's essential to stick to the format of these basic skills. When creating a plan, replace the arguments of each skill with specific items or continuous parameters. You can first describe the provided scene and what it indicates about the provided task objects to help you come up with a plan. -Here is a list of objects present in this scene for this task, along with their type (formatted as : ): +Here is a list of objects present in this scene for this task, along with their type (formatted as :): {typed_objects} And here are the available types (formatted in PDDL style as ` ... - `). You can infer a hierarchy of types via this: @@ -39,6 +40,7 @@ MoveToObject(robot:robot, cup:movable_object)[] PickObject(robot:robot, cup:movable_object)[] MoveToLocation(robot:robot, table:surface)[] PlaceObject(robot:robot, cup:movable_object, table:surface)[] +OpenDrawer(robot:robot, drawer:container)[] Do not include any numbers, bullet points, code blocks, or other formatting. Just write the plan exactly as shown above. ... \ No newline at end of file diff --git a/predicators/envs/mock_spot_env.py b/predicators/envs/mock_spot_env.py index 3a2a7c4d4..d5a1f9a49 100644 --- a/predicators/envs/mock_spot_env.py +++ b/predicators/envs/mock_spot_env.py @@ -900,7 +900,7 @@ class MockSpotDrawerCleaningEnv(MockSpotEnv): # Set the preset data directory # preset_data_dir = os.path.join("mock_env_data", "MockSpotDrawerCleaningEnv") # preset_data_dir = os.path.join("mock_env_data", "saved_task_phone_drawer_cleaning") - preset_data_dir = os.path.join("mock_env_data", "test_mock_task", "MockSpotDrawerCleaningEnv") + preset_data_dir = os.path.join("mock_env_data", "MockSpotDrawerCleaningEnv") @classmethod def get_name(cls) -> str: diff --git a/scripts/mock_experiments.py b/scripts/mock_experiments.py index c3eecc7b3..76e8f7a4f 100755 --- a/scripts/mock_experiments.py +++ b/scripts/mock_experiments.py @@ -88,31 +88,31 @@ def main(args: argparse.Namespace) -> None: # Define all planner configurations planners = [ - # { - # "name": "oracle", - # "args": ["--approach", - # "oracle", - # "--perceiver", "mock_spot_perceiver"] - # }, - # { - # "name": "random", - # "args": [ - # "--approach", "random_options", - # "--random_options_max_tries", "1000", - # "--max_num_steps_option_rollout", "100", - # "--perceiver", "mock_spot_perceiver", - # "--timeout", "60", - # ] - # }, - # { - # "name": "llm_open_loop", - # "args": [ - # "--approach", "llm_open_loop", - # "--perceiver", "mock_spot_perceiver", - # "--llm_model_name", "gpt-4o", - # "--llm_temperature", "0.2" - # ] - # }, + { + "name": "oracle", + "args": ["--approach", + "oracle", + "--perceiver", "mock_spot_perceiver"] + }, + { + "name": "random", + "args": [ + "--approach", "random_options", + "--random_options_max_tries", "1000", + "--max_num_steps_option_rollout", "100", + "--perceiver", "mock_spot_perceiver", + "--timeout", "60", + ] + }, + { + "name": "llm_open_loop", + "args": [ + "--approach", "llm_open_loop", + "--perceiver", "mock_spot_perceiver", + "--llm_model_name", "gpt-4o", + "--llm_temperature", "0.2" + ] + }, { "name": "llm_closed_loop", "args": [ @@ -124,36 +124,36 @@ def main(args: argparse.Namespace) -> None: # "--execution_monitor", "expected_atoms" ] }, - # { - # "name": "vlm_open_loop", - # "args": [ - # "--approach", "vlm_open_loop", - # "--perceiver", "mock_spot_perceiver", - # "--vlm_model_name", "gpt-4o", - # "--llm_temperature", "0.2" - # ] - # }, - # { - # "name": "vlm_closed_loop", - # "args": [ - # "--approach", "vlm_open_loop", - # "--perceiver", "mock_spot_perceiver", - # "--vlm_model_name", "gpt-4o", - # "--llm_temperature", "0.2", - # "--execution_monitor", "mpc" - # # "--execution_monitor", "expected_atoms" - # ] - # }, - # { - # "name": "vlm_captioning", - # "args": [ - # "--approach", "vlm_captioning", - # "--perceiver", "vlm_perceiver", - # "--vlm_model_name", "gpt-4o", - # "--vlm_temperature", "0.2", - # "--execution_monitor", "mpc" - # ] - # } + { + "name": "vlm_open_loop", + "args": [ + "--approach", "vlm_open_loop", + "--perceiver", "mock_spot_perceiver", + "--vlm_model_name", "gpt-4o", + "--llm_temperature", "0.2" + ] + }, + { + "name": "vlm_closed_loop", + "args": [ + "--approach", "vlm_open_loop", + "--perceiver", "mock_spot_perceiver", + "--vlm_model_name", "gpt-4o", + "--llm_temperature", "0.2", + "--execution_monitor", "mpc" + # "--execution_monitor", "expected_atoms" + ] + }, + { + "name": "vlm_captioning", + "args": [ + "--approach", "vlm_captioning", + "--perceiver", "vlm_perceiver", + "--vlm_model_name", "gpt-4o", + "--vlm_temperature", "0.2", + "--execution_monitor", "mpc" + ] + } ] # Run each planner