Skip to content

Commit

Permalink
Merge branch 'lis-spot/update-fm-baselines-experiments' of https://gi…
Browse files Browse the repository at this point in the history
…thub.com/bdaiinstitute/predicators into lis-spot/update-fm-baselines-experiments
  • Loading branch information
lf-zhao committed Jan 31, 2025
2 parents a841411 + e232df8 commit c854a14
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 57 deletions.
3 changes: 3 additions & 0 deletions output.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
['python', 'predicators/main.py', '--env', 'mock_spot_drawer_cleaning', '--seed', '0', '--num_train_tasks', '0', '--num_test_tasks', '1', '--bilevel_plan_without_sim', 'True', '--horizon', '20', '--load_approach', '--approach', 'vlm_open_loop', '--perceiver', 'mock_spot_perceiver', '--vlm_model_name', 'gpt-4o-mini', '--llm_temperature', '0.2', '--execution_monitor', 'mpc']
['python', 'predicators/main.py', '--env', 'mock_spot_drawer_cleaning', '--seed', '0', '--num_train_tasks', '0', '--num_test_tasks', '1', '--bilevel_plan_without_sim', 'True', '--horizon', '20', '--load_approach', '--approach', 'vlm_open_loop', '--perceiver', 'mock_spot_perceiver', '--vlm_model_name', 'gpt-4o-mini', '--llm_temperature', '0.2', '--execution_monitor', 'mpc']
pybullet build time: Nov 28 2023 23:52:03
4 changes: 3 additions & 1 deletion predicators/approaches/llm_planning_prompts/zero_shot.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@ as well as continuous arguments (indicated by `params_space` field, which is for

{options}

Preconditions indicate the properties of the scene that must be true for you to execute an action. The effects are what will happen to the scene when you execute the actions.
You are only allowed to use the provided skills. It's essential to stick to the format of these basic skills. When creating a plan, replace
the arguments of each skill with specific items or continuous parameters. You can first describe the provided scene and what it indicates about the provided
task objects to help you come up with a plan.

Here is a list of objects present in this scene for this task, along with their type (formatted as <object_name>: <type_name>):
Here is a list of objects present in this scene for this task, along with their type (formatted as <object_name>:<type_name>):
{typed_objects}

And here are the available types (formatted in PDDL style as `<type_name1> <type_name2>... - <parent_type_name>`). You can infer a hierarchy of types via this:
Expand Down Expand Up @@ -39,6 +40,7 @@ MoveToObject(robot:robot, cup:movable_object)[]
PickObject(robot:robot, cup:movable_object)[]
MoveToLocation(robot:robot, table:surface)[]
PlaceObject(robot:robot, cup:movable_object, table:surface)[]
OpenDrawer(robot:robot, drawer:container)[]

Do not include any numbers, bullet points, code blocks, or other formatting. Just write the plan exactly as shown above.
...
2 changes: 1 addition & 1 deletion predicators/envs/mock_spot_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,7 @@ class MockSpotDrawerCleaningEnv(MockSpotEnv):
# Set the preset data directory
# preset_data_dir = os.path.join("mock_env_data", "MockSpotDrawerCleaningEnv")
# preset_data_dir = os.path.join("mock_env_data", "saved_task_phone_drawer_cleaning")
preset_data_dir = os.path.join("mock_env_data", "test_mock_task", "MockSpotDrawerCleaningEnv")
preset_data_dir = os.path.join("mock_env_data", "MockSpotDrawerCleaningEnv")

@classmethod
def get_name(cls) -> str:
Expand Down
110 changes: 55 additions & 55 deletions scripts/mock_experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,31 +88,31 @@ def main(args: argparse.Namespace) -> None:

# Define all planner configurations
planners = [
# {
# "name": "oracle",
# "args": ["--approach",
# "oracle",
# "--perceiver", "mock_spot_perceiver"]
# },
# {
# "name": "random",
# "args": [
# "--approach", "random_options",
# "--random_options_max_tries", "1000",
# "--max_num_steps_option_rollout", "100",
# "--perceiver", "mock_spot_perceiver",
# "--timeout", "60",
# ]
# },
# {
# "name": "llm_open_loop",
# "args": [
# "--approach", "llm_open_loop",
# "--perceiver", "mock_spot_perceiver",
# "--llm_model_name", "gpt-4o",
# "--llm_temperature", "0.2"
# ]
# },
{
"name": "oracle",
"args": ["--approach",
"oracle",
"--perceiver", "mock_spot_perceiver"]
},
{
"name": "random",
"args": [
"--approach", "random_options",
"--random_options_max_tries", "1000",
"--max_num_steps_option_rollout", "100",
"--perceiver", "mock_spot_perceiver",
"--timeout", "60",
]
},
{
"name": "llm_open_loop",
"args": [
"--approach", "llm_open_loop",
"--perceiver", "mock_spot_perceiver",
"--llm_model_name", "gpt-4o",
"--llm_temperature", "0.2"
]
},
{
"name": "llm_closed_loop",
"args": [
Expand All @@ -124,36 +124,36 @@ def main(args: argparse.Namespace) -> None:
# "--execution_monitor", "expected_atoms"
]
},
# {
# "name": "vlm_open_loop",
# "args": [
# "--approach", "vlm_open_loop",
# "--perceiver", "mock_spot_perceiver",
# "--vlm_model_name", "gpt-4o",
# "--llm_temperature", "0.2"
# ]
# },
# {
# "name": "vlm_closed_loop",
# "args": [
# "--approach", "vlm_open_loop",
# "--perceiver", "mock_spot_perceiver",
# "--vlm_model_name", "gpt-4o",
# "--llm_temperature", "0.2",
# "--execution_monitor", "mpc"
# # "--execution_monitor", "expected_atoms"
# ]
# },
# {
# "name": "vlm_captioning",
# "args": [
# "--approach", "vlm_captioning",
# "--perceiver", "vlm_perceiver",
# "--vlm_model_name", "gpt-4o",
# "--vlm_temperature", "0.2",
# "--execution_monitor", "mpc"
# ]
# }
{
"name": "vlm_open_loop",
"args": [
"--approach", "vlm_open_loop",
"--perceiver", "mock_spot_perceiver",
"--vlm_model_name", "gpt-4o",
"--llm_temperature", "0.2"
]
},
{
"name": "vlm_closed_loop",
"args": [
"--approach", "vlm_open_loop",
"--perceiver", "mock_spot_perceiver",
"--vlm_model_name", "gpt-4o",
"--llm_temperature", "0.2",
"--execution_monitor", "mpc"
# "--execution_monitor", "expected_atoms"
]
},
{
"name": "vlm_captioning",
"args": [
"--approach", "vlm_captioning",
"--perceiver", "vlm_perceiver",
"--vlm_model_name", "gpt-4o",
"--vlm_temperature", "0.2",
"--execution_monitor", "mpc"
]
}
]

# Run each planner
Expand Down

0 comments on commit c854a14

Please sign in to comment.