From 84e3cf59b817f4d3d186c9d2252cd9a671fad200 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Mon, 19 Jun 2023 22:53:04 -0400 Subject: [PATCH 01/17] 20 task yaml update --- predicators/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/predicators/main.py b/predicators/main.py index de27151760..e7d14c626e 100644 --- a/predicators/main.py +++ b/predicators/main.py @@ -123,7 +123,7 @@ def main() -> None: # Run the full pipeline. _run_pipeline(env, approach, stripped_train_tasks, offline_dataset) script_time = time.perf_counter() - script_start - if CFG.env == "behavior": # pragma: no cover + if CFG.env == "behavior" and CFG.behavior_save_video: # pragma: no cover assert isinstance(env, BehaviorEnv) task_name = str(CFG.behavior_task_list)[2:-2] env.igibson_behavior_env.simulator.viewer.make_video( From 60bd2be916605a6373bcc6860007bd5dd30e2c42 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Mon, 19 Jun 2023 23:01:58 -0400 Subject: [PATCH 02/17] updated yaml --- scripts/configs/behavior_20_evaluation.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/configs/behavior_20_evaluation.yaml b/scripts/configs/behavior_20_evaluation.yaml index d57d536fc5..281785313e 100644 --- a/scripts/configs/behavior_20_evaluation.yaml +++ b/scripts/configs/behavior_20_evaluation.yaml @@ -153,8 +153,9 @@ FLAGS: # general flags timeout: 1000.0 horizon: 1000 behavior_override_learned_samplers: True + behavior_ignore_discover_failures: True num_train_tasks: 0 num_test_tasks: 5 -START_SEED: 0 +START_SEED: 465 NUM_SEEDS: 1 USE_GPU: False From 74c3704e9b906b547e2cdffe9a646e3d30f1fdd1 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Mon, 19 Jun 2023 23:19:22 -0400 Subject: [PATCH 03/17] updated to 10 seeds --- scripts/configs/behavior_20_evaluation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/configs/behavior_20_evaluation.yaml b/scripts/configs/behavior_20_evaluation.yaml index 281785313e..bde57f275b 100644 --- a/scripts/configs/behavior_20_evaluation.yaml +++ b/scripts/configs/behavior_20_evaluation.yaml @@ -157,5 +157,5 @@ FLAGS: # general flags num_train_tasks: 0 num_test_tasks: 5 START_SEED: 465 -NUM_SEEDS: 1 +NUM_SEEDS: 10 USE_GPU: False From 84cd9666a254c4f5d2cd8602a64065b11e67da85 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Tue, 19 Sep 2023 17:16:07 -0400 Subject: [PATCH 04/17] always create dataset --- predicators/main.py | 12 +++++------- scripts/configs/behavior_20_evaluation.yaml | 3 ++- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/predicators/main.py b/predicators/main.py index e7d14c626e..72ef8dda49 100644 --- a/predicators/main.py +++ b/predicators/main.py @@ -113,13 +113,11 @@ def main() -> None: # Create the agent (approach). approach = create_approach(CFG.approach, preds, options, env.types, env.action_space, stripped_train_tasks) - if approach.is_learning_based: - # Create the offline dataset. Note that this needs to be done using - # the non-stripped train tasks because dataset generation may need - # to use the oracle predicates (e.g. demo data generation). - offline_dataset = create_dataset(env, train_tasks, options) - else: - offline_dataset = None + # Create the offline dataset. Note that this needs to be done using + # the non-stripped train tasks because dataset generation may need + # to use the oracle predicates (e.g. demo data generation). + offline_dataset = create_dataset(env, train_tasks, options) + # Run the full pipeline. _run_pipeline(env, approach, stripped_train_tasks, offline_dataset) script_time = time.perf_counter() - script_start diff --git a/scripts/configs/behavior_20_evaluation.yaml b/scripts/configs/behavior_20_evaluation.yaml index bde57f275b..2049399daf 100644 --- a/scripts/configs/behavior_20_evaluation.yaml +++ b/scripts/configs/behavior_20_evaluation.yaml @@ -154,7 +154,8 @@ FLAGS: # general flags horizon: 1000 behavior_override_learned_samplers: True behavior_ignore_discover_failures: True - num_train_tasks: 0 + create_training_dataset: True + num_train_tasks: 5 num_test_tasks: 5 START_SEED: 465 NUM_SEEDS: 10 From f94d4e9a875a1275a8ac43ed71d29168dc1a1788 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 21 Sep 2023 09:43:31 -0400 Subject: [PATCH 05/17] script to run 8 tasks --- .../configs/behavior_last_8_evaluation.yaml | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 scripts/configs/behavior_last_8_evaluation.yaml diff --git a/scripts/configs/behavior_last_8_evaluation.yaml b/scripts/configs/behavior_last_8_evaluation.yaml new file mode 100644 index 0000000000..757f3824e1 --- /dev/null +++ b/scripts/configs/behavior_last_8_evaluation.yaml @@ -0,0 +1,78 @@ +# A configuration file for 20 BEHAVIOR tasks. +--- +APPROACHES: + my-oracle: + NAME: "oracle" + FLAGS: + plan_only_eval: True + sesame_task_planner: fdopt +ENVS: + picking_up_trash-Wainscott_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Wainscott_0_int + behavior_test_scene_name: Wainscott_0_int + behavior_task_list: "[picking_up_trash]" + behavior_option_model_eval: True + polishing_furniture-Rs_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Rs_int + behavior_test_scene_name: Rs_int + behavior_task_list: "[polishing_furniture]" + behavior_option_model_eval: True + polishing_silver-Wainscott_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Wainscott_0_int + behavior_test_scene_name: Wainscott_0_int + behavior_task_list: "[polishing_silver]" + behavior_option_model_eval: True + putting_leftovers_away-Ihlen_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_1_int + behavior_test_scene_name: Ihlen_1_int + behavior_task_list: "[putting_leftovers_away]" + behavior_option_model_eval: True + re-shelving_library_books-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[re-shelving_library_books]" + behavior_option_model_eval: True + sorting_books-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[sorting_books]" + behavior_option_model_eval: True + throwing_away_leftovers-Ihlen_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_1_int + behavior_test_scene_name: Ihlen_1_int + behavior_task_list: "[throwing_away_leftovers]" + behavior_option_model_eval: True + unpacking_suitcase-Ihlen_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_1_int + behavior_test_scene_name: Ihlen_1_int + behavior_task_list: "[unpacking_suitcase]" + behavior_option_model_eval: True +ARGS: {} +FLAGS: # general flags + offline_data_planning_timeout: 1000.0 + timeout: 1000.0 + horizon: 1000 + behavior_override_learned_samplers: True + behavior_ignore_discover_failures: True + create_training_dataset: True + num_train_tasks: 5 + num_test_tasks: 5 +START_SEED: 465 +NUM_SEEDS: 10 +USE_GPU: False From e249e0918209e9b11172f9c5ab0aec243320a8a6 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Wed, 4 Oct 2023 15:52:52 -0400 Subject: [PATCH 06/17] added extra 5 tasks for eval --- .../configs/behavior_20_evaluation_extra.yaml | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 scripts/configs/behavior_20_evaluation_extra.yaml diff --git a/scripts/configs/behavior_20_evaluation_extra.yaml b/scripts/configs/behavior_20_evaluation_extra.yaml new file mode 100644 index 0000000000..2cec26ec63 --- /dev/null +++ b/scripts/configs/behavior_20_evaluation_extra.yaml @@ -0,0 +1,57 @@ +# A configuration file for 20 BEHAVIOR tasks. +--- +APPROACHES: + my-oracle: + NAME: "oracle" + FLAGS: + plan_only_eval: True + sesame_task_planner: fdopt +ENVS: + setting_mousetraps-Benevolence_2_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Benevolence_2_int + behavior_test_scene_name: Benevolence_2_int + behavior_task_list: "[setting_mousetraps]" + behavior_option_model_eval: True + cleaning_a_car-Ihlen_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_0_int + behavior_test_scene_name: Ihlen_0_int + behavior_task_list: "[cleaning_a_car]" + behavior_option_model_eval: True + installing_a_fax_machine-Pomaria_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_0_int + behavior_test_scene_name: Pomaria_0_int + behavior_task_list: "[installing_a_fax_machine]" + behavior_option_model_eval: True + loading_the_dishwasher-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[loading_the_dishwasher]" + behavior_option_model_eval: True + re-shelving_library_books-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[re-shelving_library_books]" + behavior_option_model_eval: True +ARGS: {} +FLAGS: # general flags + offline_data_planning_timeout: 1000.0 + timeout: 1000.0 + horizon: 1000 + behavior_override_learned_samplers: True + behavior_ignore_discover_failures: True + create_training_dataset: True + num_train_tasks: 5 + num_test_tasks: 5 +START_SEED: 465 +NUM_SEEDS: 10 +USE_GPU: False From 04be48947c6c39a52b9fd501b78e6bf60a0e5e02 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Fri, 6 Oct 2023 00:14:38 -0400 Subject: [PATCH 07/17] still broken --- predicators/behavior_utils/behavior_utils.py | 2 +- .../behavior_utils/motion_planner_fns.py | 1 + .../behavior_utils/option_model_fns.py | 10 ++++++--- predicators/envs/behavior.py | 2 +- predicators/main.py | 21 ++++++++++++++++++- predicators/planning.py | 3 ++- predicators/utils.py | 5 ++++- 7 files changed, 36 insertions(+), 8 deletions(-) diff --git a/predicators/behavior_utils/behavior_utils.py b/predicators/behavior_utils/behavior_utils.py index bc847a5fae..5af4fdbf0f 100644 --- a/predicators/behavior_utils/behavior_utils.py +++ b/predicators/behavior_utils/behavior_utils.py @@ -1076,7 +1076,7 @@ def sample_navigation_params(igibson_behavior_env: "BehaviorEnv", Implemented in a separate method to enable code reuse in option_model_fns. """ - closeness_limit = 2.00 + closeness_limit = 1.00 #2.00 nearness_limit = 0.15 distance = nearness_limit + ( (closeness_limit - nearness_limit) * rng.random()) diff --git a/predicators/behavior_utils/motion_planner_fns.py b/predicators/behavior_utils/motion_planner_fns.py index 633e9b60f1..eff490ef19 100644 --- a/predicators/behavior_utils/motion_planner_fns.py +++ b/predicators/behavior_utils/motion_planner_fns.py @@ -135,6 +135,7 @@ def sample_fn(env: "BehaviorEnv", obstacles=obstacles, override_sample_fn=lambda: sample_fn(env, rng), rng=rng, + max_distance=0.01, ) p.restoreState(state) else: diff --git a/predicators/behavior_utils/option_model_fns.py b/predicators/behavior_utils/option_model_fns.py index 82eedfbc02..979fe57d83 100644 --- a/predicators/behavior_utils/option_model_fns.py +++ b/predicators/behavior_utils/option_model_fns.py @@ -123,7 +123,11 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: rh_final_grasp_postion, p.getQuaternionFromEuler(rh_final_grasp_orn)) - # 3. Close hand and simulate grasp + # 1.1 step the environment a few timesteps to update location. + for _ in range(3): + env.step(np.zeros(env.action_space.shape)) + + # 2. Close hand and simulate grasp. a = np.zeros(env.action_space.shape, dtype=float) a[16] = 1.0 assisted_grasp_action = np.zeros(28, dtype=float) @@ -141,7 +145,7 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: env.robots[0].parts["right_hand"].handle_assisted_grasping( assisted_grasp_action, override_ag_data=(grasp_obj_body_id, -1), - bypass_force_check=True) + bypass_force_check=False) # 3.2 step the environment a few timesteps to complete grasp for _ in range(5): env.step(a) @@ -216,7 +220,7 @@ def placeOntopObjectOptionModel(_init_state: State, rh_orig_grasp_position, rh_orig_grasp_orn) # this is running a series of zero action to step simulator # to let the object fall into its place - for _ in range(15): + for _ in range(25): env.step(np.zeros(env.action_space.shape)) # Check whether object is ontop of not a target object objs_under = set() diff --git a/predicators/envs/behavior.py b/predicators/envs/behavior.py index 82ef068d54..ae60280ead 100644 --- a/predicators/envs/behavior.py +++ b/predicators/envs/behavior.py @@ -775,7 +775,7 @@ def _reachable_classifier(self, return False return (np.linalg.norm( # type: ignore np.array(robot_obj.get_position()) - - np.array(ig_obj.get_position())) < 2) + np.array(ig_obj.get_position())) < 1.0) def _reachable_nothing_classifier( self, diff --git a/predicators/main.py b/predicators/main.py index 72ef8dda49..02478e3399 100644 --- a/predicators/main.py +++ b/predicators/main.py @@ -34,6 +34,8 @@ --seed 0 --excluded_predicates all """ +import curses +import numpy as np import logging import os import sys @@ -51,7 +53,7 @@ BilevelPlanningApproach from predicators.approaches.gnn_approach import GNNApproach from predicators.datasets import create_dataset -from predicators.envs import BaseEnv, create_new_env +from predicators.envs import BaseEnv, create_new_env, get_or_create_env from predicators.envs.behavior import BehaviorEnv from predicators.planning import _run_plan_with_option_model from predicators.settings import CFG @@ -340,6 +342,23 @@ def _run_testing(env: BaseEnv, approach: BaseApproach) -> Metrics: last_plan = approach.get_last_plan() last_traj = approach.get_last_traj() option_model_start_time = time.time() + if CFG.env == "behavior" and \ + CFG.behavior_mode == 'iggui': # pragma: no cover + env = get_or_create_env('behavior') + assert isinstance(env, BehaviorEnv) + win = curses.initscr() + win.nodelay(True) + win.addstr( + 0, 0, + "VIDEO CREATION MODE: You have time to position the iggui window \ + to the location you want for recording. Type 'q' to indicate you \ + have finished positioning: ") + flag = win.getch() + while flag == -1 or chr(flag) != 'q': + env.igibson_behavior_env.step(np.zeros(env.action_space.shape)) + flag = win.getch() + curses.endwin() + logging.info("VIDEO CREATION MODE: Starting planning.") traj, solved = _run_plan_with_option_model( task, test_task_idx, approach.get_option_model(), last_plan, last_traj) diff --git a/predicators/planning.py b/predicators/planning.py index 20a7773607..05ce01d359 100644 --- a/predicators/planning.py +++ b/predicators/planning.py @@ -687,6 +687,7 @@ def _run_plan_with_option_model( lambda s, m, o, p: True).ground(option.objects, option.params) action_option.memory = option.memory actions[idx].set_option(action_option) + time.sleep(2) # Since we're not checking the expected_atoms, we need to # explicitly check if the goal is achieved. if task.goal_holds(traj[-1]): @@ -1011,7 +1012,7 @@ def _sesame_plan_with_fast_downward( potentially add effects to null operators, but this ability is not implemented here. """ - init_atoms = utils.abstract(task.init, predicates) + init_atoms = utils.abstract(task.init, predicates, skip_allclose_check=True) objects = list(task.init) start_time = time.perf_counter() # Create the domain and problem strings, then write them to tempfiles. diff --git a/predicators/utils.py b/predicators/utils.py index e62855b853..9ea513ecda 100644 --- a/predicators/utils.py +++ b/predicators/utils.py @@ -2542,7 +2542,10 @@ class VideoMonitor(Monitor): def observe(self, state: State, action: Optional[Action]) -> None: del state # unused - self._video.extend(self._render_fn(action, None)) + render_state = self._render_fn(action, None) + self._video.extend(render_state) + self._video.extend(render_state) + self._video.extend(render_state) def get_video(self) -> Video: """Return the video.""" From fe29d45d7b6df0307541486f1b3c1e41b75e7d76 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Fri, 6 Oct 2023 12:26:36 -0400 Subject: [PATCH 08/17] added yaml file --- predicators/utils.py | 2 +- scripts/configs/behavior_20_videos.yaml | 164 ++++++++++++++++++++++++ 2 files changed, 165 insertions(+), 1 deletion(-) create mode 100644 scripts/configs/behavior_20_videos.yaml diff --git a/predicators/utils.py b/predicators/utils.py index 9ea513ecda..df57f3208f 100644 --- a/predicators/utils.py +++ b/predicators/utils.py @@ -2622,7 +2622,7 @@ def save_video(outfile: str, video: Video) -> None: """Save the video to video_dir/outfile.""" outdir = CFG.video_dir os.makedirs(outdir, exist_ok=True) - outpath = os.path.join(outdir, outfile) + outpath = os.path.join(outdir, str(CFG.seed) + "_" + outfile) imageio.mimwrite(outpath, video, fps=CFG.video_fps) # type: ignore logging.info(f"Wrote out to {outpath}") diff --git a/scripts/configs/behavior_20_videos.yaml b/scripts/configs/behavior_20_videos.yaml new file mode 100644 index 0000000000..0e27ab6bfb --- /dev/null +++ b/scripts/configs/behavior_20_videos.yaml @@ -0,0 +1,164 @@ +# A configuration file for 20 BEHAVIOR tasks. +--- +APPROACHES: + my-oracle: + NAME: "oracle" + FLAGS: + sesame_task_planner: fdopt +ENVS: + # installing_a_fax_machine-Pomaria_0_int: + # NAME: "behavior" + # FLAGS: + # behavior_train_scene_name: Pomaria_0_int + # behavior_test_scene_name: Pomaria_0_int + # behavior_task_list: "[installing_a_fax_machine]" + # behavior_option_model_eval: True + # locking_every_door-Pomaria_0_int: + # NAME: "behavior" + # FLAGS: + # behavior_train_scene_name: Pomaria_0_int + # behavior_test_scene_name: Pomaria_0_int + # behavior_task_list: "[locking_every_door]" + # behavior_option_model_eval: True + # locking_every_window-Rs_int: + # NAME: "behavior" + # FLAGS: + # behavior_train_scene_name: Rs_int + # behavior_test_scene_name: Rs_int + # behavior_task_list: "[locking_every_window]" + # behavior_option_model_eval: True + opening_packages-Pomaria_2_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_2_int + behavior_test_scene_name: Pomaria_2_int + behavior_task_list: "[opening_packages]" + behavior_option_model_eval: True + opening_presents-Pomaria_2_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_2_int + behavior_test_scene_name: Pomaria_2_int + behavior_task_list: "[opening_presents]" + behavior_option_model_eval: True + boxing_books_up_for_storage-Benevolence_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Benevolence_1_int + behavior_test_scene_name: Benevolence_1_int + behavior_task_list: "[boxing_books_up_for_storage]" + behavior_option_model_eval: True + collect_misplaced_items-Merom_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Merom_1_int + behavior_test_scene_name: Merom_1_int + behavior_task_list: "[collect_misplaced_items]" + behavior_option_model_eval: True + collecting_aluminum_cans-Benevolence_2_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Benevolence_2_int + behavior_test_scene_name: Benevolence_2_int + behavior_task_list: "[collecting_aluminum_cans]" + behavior_option_model_eval: True + organizing_file_cabinet-Pomaria_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_0_int + behavior_test_scene_name: Pomaria_0_int + behavior_task_list: "[organizing_file_cabinet]" + behavior_option_model_eval: True + picking_up_trash-Wainscott_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Wainscott_0_int + behavior_test_scene_name: Wainscott_0_int + behavior_task_list: "[picking_up_trash]" + behavior_option_model_eval: True + polishing_furniture-Rs_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Rs_int + behavior_test_scene_name: Rs_int + behavior_task_list: "[polishing_furniture]" + behavior_option_model_eval: True + polishing_silver-Wainscott_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Wainscott_0_int + behavior_test_scene_name: Wainscott_0_int + behavior_task_list: "[polishing_silver]" + behavior_option_model_eval: True + putting_leftovers_away-Ihlen_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_1_int + behavior_test_scene_name: Ihlen_1_int + behavior_task_list: "[putting_leftovers_away]" + behavior_option_model_eval: True + re-shelving_library_books-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[re-shelving_library_books]" + behavior_option_model_eval: True + sorting_books-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[sorting_books]" + behavior_option_model_eval: True + throwing_away_leftovers-Ihlen_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_1_int + behavior_test_scene_name: Ihlen_1_int + behavior_task_list: "[throwing_away_leftovers]" + behavior_option_model_eval: True + unpacking_suitcase-Ihlen_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_1_int + behavior_test_scene_name: Ihlen_1_int + behavior_task_list: "[unpacking_suitcase]" + behavior_option_model_eval: True + setting_mousetraps-Benevolence_2_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Benevolence_2_int + behavior_test_scene_name: Benevolence_2_int + behavior_task_list: "[setting_mousetraps]" + behavior_option_model_eval: True + cleaning_a_car-Ihlen_0_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Ihlen_0_int + behavior_test_scene_name: Ihlen_0_int + behavior_task_list: "[cleaning_a_car]" + behavior_option_model_eval: True + loading_the_dishwasher-Pomaria_1_int: + NAME: "behavior" + FLAGS: + behavior_train_scene_name: Pomaria_1_int + behavior_test_scene_name: Pomaria_1_int + behavior_task_list: "[loading_the_dishwasher]" + behavior_option_model_eval: True +ARGS: {} +FLAGS: # general flags + offline_data_planning_timeout: 1000.0 + timeout: 1000.0 + horizon: 1000 + behavior_override_learned_samplers: True + behavior_ignore_discover_failures: True + create_training_dataset: False + num_train_tasks: 0 + num_test_tasks: 1 + behavior_mode: simple + behavior_save_video: True + behavior_option_model_rrt: True +START_SEED: 0 +NUM_SEEDS: 10 +USE_GPU: False From 5637f5244c04976c285e771ef0dbefec2c0d2cd3 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Mon, 30 Oct 2023 18:25:22 -0400 Subject: [PATCH 09/17] added hand rrt --- .../behavior_utils/motion_planner_fns.py | 120 +++++++++--------- 1 file changed, 60 insertions(+), 60 deletions(-) diff --git a/predicators/behavior_utils/motion_planner_fns.py b/predicators/behavior_utils/motion_planner_fns.py index eff490ef19..5bc318a572 100644 --- a/predicators/behavior_utils/motion_planner_fns.py +++ b/predicators/behavior_utils/motion_planner_fns.py @@ -24,7 +24,7 @@ RoomFloor # pylint: disable=unused-import from igibson.objects.articulated_object import URDFObject from igibson.utils.behavior_robot_planning_utils import \ - plan_base_motion_br # , plan_hand_motion_br + plan_base_motion_br, plan_hand_motion_br except (ImportError, ModuleNotFoundError) as e: pass @@ -222,13 +222,13 @@ def make_grasp_plan( x = obj_pos[0] + grasp_offset[0] y = obj_pos[1] + grasp_offset[1] z = obj_pos[2] + grasp_offset[2] - # hand_x, hand_y, hand_z = env.robots[0].parts["right_hand"].get_position() - # minx = min(x, hand_x) - 0.5 - # miny = min(y, hand_y) - 0.5 - # minz = min(z, hand_z) - 0.5 - # maxx = max(x, hand_x) + 0.5 - # maxy = max(y, hand_y) + 0.5 - # maxz = max(z, hand_z) + 0.5 + hand_x, hand_y, hand_z = env.robots[0].parts["right_hand"].get_position() + minx = min(x, hand_x) - 0.5 + miny = min(y, hand_y) - 0.5 + minz = min(z, hand_z) - 0.5 + maxx = max(x, hand_x) + 0.5 + maxy = max(y, hand_y) + 0.5 + maxz = max(z, hand_z) + 0.5 # compute the angle the hand must be in such that it can # grasp the object from its current offset position @@ -276,25 +276,25 @@ def make_grasp_plan( euler_angles[2], ] # For now we are not running rrt with grasp. - # if env.use_rrt: - # # plan a motion to the pose [x, y, z, euler_angles[0], - # # euler_angles[1], euler_angles[2]] - # plan = plan_hand_motion_br( - # robot=env.robots[0], - # obj_in_hand=None, - # end_conf=end_conf, - # hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), - # obstacles=get_scene_body_ids(env, - # include_self=True, - # include_right_hand=True), - # rng=rng, - # ) - # p.restoreState(state) - # else: - pos = env.robots[0].parts["right_hand"].get_position() - plan = [[pos[0], pos[1], pos[2]] + list( - p.getEulerFromQuaternion( - env.robots[0].parts["right_hand"].get_orientation())), end_conf] + if CFG.behavior_option_model_rrt: + # plan a motion to the pose [x, y, z, euler_angles[0], + # euler_angles[1], euler_angles[2]] + plan = plan_hand_motion_br( + robot=env.robots[0], + obj_in_hand=None, + end_conf=end_conf, + hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), + obstacles=get_scene_body_ids(env, + include_self=True, + include_right_hand=True), + rng=rng, + ) + p.restoreState(state) + else: + pos = env.robots[0].parts["right_hand"].get_position() + plan = [[pos[0], pos[1], pos[2]] + list( + p.getEulerFromQuaternion( + env.robots[0].parts["right_hand"].get_orientation())), end_conf] # NOTE: This below line is *VERY* important after the # pybullet state is restored. The hands keep an internal @@ -306,11 +306,11 @@ def make_grasp_plan( env.robots[0].parts["left_hand"].set_position( env.robots[0].parts["left_hand"].get_position()) - # # If RRT planning fails, fail and return None - # if plan is None: - # logging.info(f"PRIMITIVE: grasp {obj.name} fail, failed " - # f"to find plan to continuous params {grasp_offset}") - # return None + # If RRT planning fails, fail and return None + if plan is None: + logging.info(f"PRIMITIVE: grasp {obj.name} fail, failed " + f"to find plan to continuous params {grasp_offset}") + return None # Grasping Phase 2: Move along the vector from the # position the hand ends up in to the object and @@ -418,14 +418,14 @@ def make_place_plan( if obj.get_body_id() == obj_in_hand_idx ][0] x, y, z = np.add(place_rel_pos, obj.get_position()) - # hand_x, hand_y, hand_z = env.robots[0].parts["right_hand"].get_position() + hand_x, hand_y, hand_z = env.robots[0].parts["right_hand"].get_position() - # minx = min(x, hand_x) - 1 - # miny = min(y, hand_y) - 1 - # minz = min(z, hand_z) - 0.5 - # maxx = max(x, hand_x) + 1 - # maxy = max(y, hand_y) + 1 - # maxz = max(z, hand_z) + 0.5 + minx = min(x, hand_x) - 1 + miny = min(y, hand_y) - 1 + minz = min(z, hand_z) - 0.5 + maxx = max(x, hand_x) + 1 + maxy = max(y, hand_y) + 1 + maxz = max(z, hand_z) + 0.5 obstacles = get_scene_body_ids(env, include_self=False) obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) @@ -438,22 +438,22 @@ def make_place_plan( 0, ] # For now we are not running rrt with place. - # if False: - # plan = plan_hand_motion_br( - # robot=env.robots[0], - # obj_in_hand=obj_in_hand, - # end_conf=end_conf, - # hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), - # obstacles=obstacles, - # rng=rng, - # ) - # p.restoreState(state) - # p.removeState(state) - # else: - pos = env.robots[0].parts["right_hand"].get_position() - plan = [[pos[0], pos[1], pos[2]] + list( - p.getEulerFromQuaternion( - env.robots[0].parts["right_hand"].get_orientation())), end_conf] + if CFG.behavior_option_model_rrt: + plan = plan_hand_motion_br( + robot=env.robots[0], + obj_in_hand=obj_in_hand, + end_conf=end_conf, + hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), + obstacles=obstacles, + rng=rng, + ) + p.restoreState(state) + p.removeState(state) + else: + pos = env.robots[0].parts["right_hand"].get_position() + plan = [[pos[0], pos[1], pos[2]] + list( + p.getEulerFromQuaternion( + env.robots[0].parts["right_hand"].get_orientation())), end_conf] # NOTE: This below line is *VERY* important after the # pybullet state is restored. The hands keep an internal @@ -465,11 +465,11 @@ def make_place_plan( env.robots[0].parts["left_hand"].set_position( env.robots[0].parts["left_hand"].get_position()) - # # If RRT planning fails, fail and return None - # if plan is None: - # logging.info(f"PRIMITIVE: placeOnTop/inside {obj.name} fail, failed " - # f"to find plan to continuous params {place_rel_pos}") - # return None + # If RRT planning fails, fail and return None + if plan is None: + logging.info(f"PRIMITIVE: placeOnTop/inside {obj.name} fail, failed " + f"to find plan to continuous params {place_rel_pos}") + return None original_orientation = list( p.getEulerFromQuaternion( From 8a2a071b2abffc4f35784340814fc7f159f3ac0e Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 15:26:09 -0400 Subject: [PATCH 10/17] added grasp and place movement --- predicators/behavior_utils/behavior_utils.py | 34 ++++++++ .../behavior_utils/motion_planner_fns.py | 13 +-- .../behavior_utils/option_model_fns.py | 86 +++++++++++++++++++ predicators/planning.py | 11 +-- 4 files changed, 134 insertions(+), 10 deletions(-) diff --git a/predicators/behavior_utils/behavior_utils.py b/predicators/behavior_utils/behavior_utils.py index 5af4fdbf0f..25530e510b 100644 --- a/predicators/behavior_utils/behavior_utils.py +++ b/predicators/behavior_utils/behavior_utils.py @@ -855,6 +855,40 @@ def get_scene_body_ids( return ids +def get_relevant_scene_body_ids( + env: "BehaviorEnv", + include_self: bool = False, + include_right_hand: bool = False, +) -> List[int]: + """Function to return list of body_ids for relveant objs in the scene for + collision checking depending on whether navigation or grasping/ placing is + being done.""" + ids = [] + for obj in env.scene.get_objects(): + if isinstance(obj, URDFObject): + # We want to exclude the floor since we're always floating and + # will never practically collide with it, but if we include it + # in collision checking, we always seem to collide. + if obj.name != "floors": + # Here are the list of relevant objects. + if "bed" in obj.name: + ids.extend(obj.body_ids) + + if include_self: + ids.append(env.robots[0].parts["left_hand"].get_body_id()) + ids.append(env.robots[0].parts["body"].get_body_id()) + ids.append(env.robots[0].parts["eye"].get_body_id()) + if not include_right_hand: + ids.append(env.robots[0].parts["right_hand"].get_body_id()) + + all_obj_ids = [] + for obj in env.scene.get_objects(): + if isinstance(obj, URDFObject): + all_obj_ids.append([obj.name, obj.body_ids]) + print("ALL OBJ IDS:", all_obj_ids) + + return ids + def detect_collision(bodyA: int, object_in_hand: Optional[int] = None) -> bool: """Detects collisions between bodyA in the scene (except for the object in diff --git a/predicators/behavior_utils/motion_planner_fns.py b/predicators/behavior_utils/motion_planner_fns.py index 5bc318a572..2e20298dc4 100644 --- a/predicators/behavior_utils/motion_planner_fns.py +++ b/predicators/behavior_utils/motion_planner_fns.py @@ -10,7 +10,7 @@ from numpy.random._generator import Generator from predicators.behavior_utils.behavior_utils import check_nav_end_pose, \ - get_aabb_volume, get_closest_point_on_aabb, get_scene_body_ids, \ + get_aabb_volume, get_closest_point_on_aabb, get_relevant_scene_body_ids, \ reset_and_release_hand from predicators.settings import CFG from predicators.structs import Array @@ -124,8 +124,8 @@ def sample_fn(env: "BehaviorEnv", valid_position[0][1], valid_position[1][2], ] - if env.use_rrt: - obstacles = get_scene_body_ids(env) + if CFG.behavior_option_model_rrt: + obstacles = get_relevant_scene_body_ids(env) if env.robots[0].parts["right_hand"].object_in_hand is not None: obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) plan = plan_base_motion_br( @@ -148,6 +148,7 @@ def sample_fn(env: "BehaviorEnv", logging.info(f"PRIMITIVE: navigate to {obj.name} with params " f"{pos_offset} failed; birrt failed to sample a plan!") return None + plan = plan + [end_conf] p.restoreState(state) p.removeState(state) @@ -284,7 +285,7 @@ def make_grasp_plan( obj_in_hand=None, end_conf=end_conf, hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), - obstacles=get_scene_body_ids(env, + obstacles=get_relevant_scene_body_ids(env, include_self=True, include_right_hand=True), rng=rng, @@ -311,6 +312,7 @@ def make_grasp_plan( logging.info(f"PRIMITIVE: grasp {obj.name} fail, failed " f"to find plan to continuous params {grasp_offset}") return None + plan = plan + [end_conf] # Grasping Phase 2: Move along the vector from the # position the hand ends up in to the object and @@ -427,7 +429,7 @@ def make_place_plan( maxy = max(y, hand_y) + 1 maxz = max(z, hand_z) + 0.5 - obstacles = get_scene_body_ids(env, include_self=False) + obstacles = get_relevant_scene_body_ids(env, include_self=False) obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) end_conf = [ x, @@ -470,6 +472,7 @@ def make_place_plan( logging.info(f"PRIMITIVE: placeOnTop/inside {obj.name} fail, failed " f"to find plan to continuous params {place_rel_pos}") return None + plan = plan + [end_conf] original_orientation = list( p.getEulerFromQuaternion( diff --git a/predicators/behavior_utils/option_model_fns.py b/predicators/behavior_utils/option_model_fns.py index 979fe57d83..2aff6d3a72 100644 --- a/predicators/behavior_utils/option_model_fns.py +++ b/predicators/behavior_utils/option_model_fns.py @@ -118,6 +118,14 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: "right_hand"].get_position() rh_orig_grasp_orn = env.robots[0].parts["right_hand"].get_orientation() + # TODO 0 Simulate Arm Movement + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + # 1 Teleport Hand to Grasp offset location env.robots[0].parts["right_hand"].set_position_orientation( rh_final_grasp_postion, @@ -150,6 +158,14 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: for _ in range(5): env.step(a) + # TODO 0 Simulate Arm Movement (Backwards) + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + # 4 Move Hand to Original Location env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) @@ -201,6 +217,14 @@ def placeOntopObjectOptionModel(_init_state: State, f"place ontop {obj_to_place_onto.name} with " f"params {target_pos}") + # TODO 0 Simulate Arm Movement + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].set_position_orientation( target_pos, p.getQuaternionFromEuler(target_orn)) env.robots[0].parts["right_hand"].force_release_obj() @@ -216,6 +240,15 @@ def placeOntopObjectOptionModel(_init_state: State, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], ) + + # TODO 0 Simulate Arm Movement (Backwards) + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) # this is running a series of zero action to step simulator @@ -383,6 +416,15 @@ def placeInsideObjectOptionModel(_init_state: State, f", {plan[-1][3:6]}) and attempting to " + f"place inside to {obj_to_place_into.name} with " f"params {target_pos}") + + # TODO 0 Simulate Arm Movement + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].force_release_obj() obj_to_place_into.force_wakeup() obj_in_hand.set_position_orientation( @@ -397,6 +439,14 @@ def placeInsideObjectOptionModel(_init_state: State, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], ) + # TODO 0 Simulate Arm Movement (Backwards) + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) # this is running a series of zero action to step @@ -476,6 +526,15 @@ def placeUnderObjectOptionModel(_init_state: State, f", {plan[-1][3:6]}) and attempting to " + f"place under to {obj_to_place_under.name} with " f"params {target_pos}") + + # TODO 0 Simulate Arm Movement + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].force_release_obj() obj_to_place_under.force_wakeup() obj_in_hand.set_position_orientation( @@ -490,6 +549,15 @@ def placeUnderObjectOptionModel(_init_state: State, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], ) + + # TODO 0 Simulate Arm Movement (Backwards) + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) # this is running a series of zero action to step @@ -599,6 +667,15 @@ def placeNextToObjectOptionModel(_init_state: State, f", {plan[-1][3:6]}) and attempting to " + f"place next to {obj_to_place_nextto.name} with " f"params {target_pos}") + + # TODO 0 Simulate Arm Movement + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].force_release_obj() obj_to_place_nextto.force_wakeup() obj_in_hand.set_position_orientation( @@ -613,6 +690,15 @@ def placeNextToObjectOptionModel(_init_state: State, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], ) + + # TODO 0 Simulate Arm Movement (Backwards) + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) + env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) # this is running a series of zero action to step diff --git a/predicators/planning.py b/predicators/planning.py index 05ce01d359..a9b3d3e8f0 100644 --- a/predicators/planning.py +++ b/predicators/planning.py @@ -77,7 +77,8 @@ def sesame_plan( """ if CFG.env == "behavior" and \ - CFG.behavior_mode == 'iggui': # pragma: no cover + CFG.behavior_mode == 'iggui' and \ + CFG.plan_only_eval: # pragma: no cover env = get_or_create_env('behavior') assert isinstance(env, BehaviorEnv) win = curses.initscr() @@ -581,10 +582,10 @@ def run_low_level_search( if cur_idx == len(skeleton): return plan, True, traj # success! else: - # logging.info("Failure: Expected Atoms Check Failed.") - # for a in expected_atoms: - # if not a.holds(traj[cur_idx]): - # logging.info(a) + logging.info("Failure: Expected Atoms Check Failed.") + for a in expected_atoms: + if not a.holds(traj[cur_idx]): + logging.info(a) can_continue_on = False else: # If we're not checking expected_atoms, we need to From 943c4f489f9236bb2714ba67a29b20ad4edffbd4 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 15:31:19 -0400 Subject: [PATCH 11/17] fixed uneccessary changes --- predicators/planning.py | 1 - predicators/utils.py | 7 ++----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/predicators/planning.py b/predicators/planning.py index a9b3d3e8f0..02f6e535d4 100644 --- a/predicators/planning.py +++ b/predicators/planning.py @@ -688,7 +688,6 @@ def _run_plan_with_option_model( lambda s, m, o, p: True).ground(option.objects, option.params) action_option.memory = option.memory actions[idx].set_option(action_option) - time.sleep(2) # Since we're not checking the expected_atoms, we need to # explicitly check if the goal is achieved. if task.goal_holds(traj[-1]): diff --git a/predicators/utils.py b/predicators/utils.py index df57f3208f..e62855b853 100644 --- a/predicators/utils.py +++ b/predicators/utils.py @@ -2542,10 +2542,7 @@ class VideoMonitor(Monitor): def observe(self, state: State, action: Optional[Action]) -> None: del state # unused - render_state = self._render_fn(action, None) - self._video.extend(render_state) - self._video.extend(render_state) - self._video.extend(render_state) + self._video.extend(self._render_fn(action, None)) def get_video(self) -> Video: """Return the video.""" @@ -2622,7 +2619,7 @@ def save_video(outfile: str, video: Video) -> None: """Save the video to video_dir/outfile.""" outdir = CFG.video_dir os.makedirs(outdir, exist_ok=True) - outpath = os.path.join(outdir, str(CFG.seed) + "_" + outfile) + outpath = os.path.join(outdir, outfile) imageio.mimwrite(outpath, video, fps=CFG.video_fps) # type: ignore logging.info(f"Wrote out to {outpath}") From 70f7f8bcf8ad807f2535df013d3bb8cc53e2c3eb Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 15:36:20 -0400 Subject: [PATCH 12/17] reverted --- .../behavior_utils/motion_planner_fns.py | 6 +- scripts/configs/behavior_20_evaluation.yaml | 8 +- .../configs/behavior_20_evaluation_extra.yaml | 57 ------ scripts/configs/behavior_20_videos.yaml | 164 ------------------ .../configs/behavior_last_8_evaluation.yaml | 78 --------- 5 files changed, 7 insertions(+), 306 deletions(-) delete mode 100644 scripts/configs/behavior_20_evaluation_extra.yaml delete mode 100644 scripts/configs/behavior_20_videos.yaml delete mode 100644 scripts/configs/behavior_last_8_evaluation.yaml diff --git a/predicators/behavior_utils/motion_planner_fns.py b/predicators/behavior_utils/motion_planner_fns.py index 2e20298dc4..732d0bd1f6 100644 --- a/predicators/behavior_utils/motion_planner_fns.py +++ b/predicators/behavior_utils/motion_planner_fns.py @@ -127,7 +127,8 @@ def sample_fn(env: "BehaviorEnv", if CFG.behavior_option_model_rrt: obstacles = get_relevant_scene_body_ids(env) if env.robots[0].parts["right_hand"].object_in_hand is not None: - obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) + if env.robots[0].parts["right_hand"].object_in_hand in obstacles: + obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) plan = plan_base_motion_br( robot=env.robots[0], end_conf=end_conf, @@ -430,7 +431,8 @@ def make_place_plan( maxz = max(z, hand_z) + 0.5 obstacles = get_relevant_scene_body_ids(env, include_self=False) - obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) + if env.robots[0].parts["right_hand"].object_in_hand in obstacles: + obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) end_conf = [ x, y, diff --git a/scripts/configs/behavior_20_evaluation.yaml b/scripts/configs/behavior_20_evaluation.yaml index 2049399daf..d57d536fc5 100644 --- a/scripts/configs/behavior_20_evaluation.yaml +++ b/scripts/configs/behavior_20_evaluation.yaml @@ -153,10 +153,8 @@ FLAGS: # general flags timeout: 1000.0 horizon: 1000 behavior_override_learned_samplers: True - behavior_ignore_discover_failures: True - create_training_dataset: True - num_train_tasks: 5 + num_train_tasks: 0 num_test_tasks: 5 -START_SEED: 465 -NUM_SEEDS: 10 +START_SEED: 0 +NUM_SEEDS: 1 USE_GPU: False diff --git a/scripts/configs/behavior_20_evaluation_extra.yaml b/scripts/configs/behavior_20_evaluation_extra.yaml deleted file mode 100644 index 2cec26ec63..0000000000 --- a/scripts/configs/behavior_20_evaluation_extra.yaml +++ /dev/null @@ -1,57 +0,0 @@ -# A configuration file for 20 BEHAVIOR tasks. ---- -APPROACHES: - my-oracle: - NAME: "oracle" - FLAGS: - plan_only_eval: True - sesame_task_planner: fdopt -ENVS: - setting_mousetraps-Benevolence_2_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Benevolence_2_int - behavior_test_scene_name: Benevolence_2_int - behavior_task_list: "[setting_mousetraps]" - behavior_option_model_eval: True - cleaning_a_car-Ihlen_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_0_int - behavior_test_scene_name: Ihlen_0_int - behavior_task_list: "[cleaning_a_car]" - behavior_option_model_eval: True - installing_a_fax_machine-Pomaria_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_0_int - behavior_test_scene_name: Pomaria_0_int - behavior_task_list: "[installing_a_fax_machine]" - behavior_option_model_eval: True - loading_the_dishwasher-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[loading_the_dishwasher]" - behavior_option_model_eval: True - re-shelving_library_books-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[re-shelving_library_books]" - behavior_option_model_eval: True -ARGS: {} -FLAGS: # general flags - offline_data_planning_timeout: 1000.0 - timeout: 1000.0 - horizon: 1000 - behavior_override_learned_samplers: True - behavior_ignore_discover_failures: True - create_training_dataset: True - num_train_tasks: 5 - num_test_tasks: 5 -START_SEED: 465 -NUM_SEEDS: 10 -USE_GPU: False diff --git a/scripts/configs/behavior_20_videos.yaml b/scripts/configs/behavior_20_videos.yaml deleted file mode 100644 index 0e27ab6bfb..0000000000 --- a/scripts/configs/behavior_20_videos.yaml +++ /dev/null @@ -1,164 +0,0 @@ -# A configuration file for 20 BEHAVIOR tasks. ---- -APPROACHES: - my-oracle: - NAME: "oracle" - FLAGS: - sesame_task_planner: fdopt -ENVS: - # installing_a_fax_machine-Pomaria_0_int: - # NAME: "behavior" - # FLAGS: - # behavior_train_scene_name: Pomaria_0_int - # behavior_test_scene_name: Pomaria_0_int - # behavior_task_list: "[installing_a_fax_machine]" - # behavior_option_model_eval: True - # locking_every_door-Pomaria_0_int: - # NAME: "behavior" - # FLAGS: - # behavior_train_scene_name: Pomaria_0_int - # behavior_test_scene_name: Pomaria_0_int - # behavior_task_list: "[locking_every_door]" - # behavior_option_model_eval: True - # locking_every_window-Rs_int: - # NAME: "behavior" - # FLAGS: - # behavior_train_scene_name: Rs_int - # behavior_test_scene_name: Rs_int - # behavior_task_list: "[locking_every_window]" - # behavior_option_model_eval: True - opening_packages-Pomaria_2_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_2_int - behavior_test_scene_name: Pomaria_2_int - behavior_task_list: "[opening_packages]" - behavior_option_model_eval: True - opening_presents-Pomaria_2_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_2_int - behavior_test_scene_name: Pomaria_2_int - behavior_task_list: "[opening_presents]" - behavior_option_model_eval: True - boxing_books_up_for_storage-Benevolence_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Benevolence_1_int - behavior_test_scene_name: Benevolence_1_int - behavior_task_list: "[boxing_books_up_for_storage]" - behavior_option_model_eval: True - collect_misplaced_items-Merom_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Merom_1_int - behavior_test_scene_name: Merom_1_int - behavior_task_list: "[collect_misplaced_items]" - behavior_option_model_eval: True - collecting_aluminum_cans-Benevolence_2_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Benevolence_2_int - behavior_test_scene_name: Benevolence_2_int - behavior_task_list: "[collecting_aluminum_cans]" - behavior_option_model_eval: True - organizing_file_cabinet-Pomaria_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_0_int - behavior_test_scene_name: Pomaria_0_int - behavior_task_list: "[organizing_file_cabinet]" - behavior_option_model_eval: True - picking_up_trash-Wainscott_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Wainscott_0_int - behavior_test_scene_name: Wainscott_0_int - behavior_task_list: "[picking_up_trash]" - behavior_option_model_eval: True - polishing_furniture-Rs_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Rs_int - behavior_test_scene_name: Rs_int - behavior_task_list: "[polishing_furniture]" - behavior_option_model_eval: True - polishing_silver-Wainscott_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Wainscott_0_int - behavior_test_scene_name: Wainscott_0_int - behavior_task_list: "[polishing_silver]" - behavior_option_model_eval: True - putting_leftovers_away-Ihlen_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_1_int - behavior_test_scene_name: Ihlen_1_int - behavior_task_list: "[putting_leftovers_away]" - behavior_option_model_eval: True - re-shelving_library_books-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[re-shelving_library_books]" - behavior_option_model_eval: True - sorting_books-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[sorting_books]" - behavior_option_model_eval: True - throwing_away_leftovers-Ihlen_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_1_int - behavior_test_scene_name: Ihlen_1_int - behavior_task_list: "[throwing_away_leftovers]" - behavior_option_model_eval: True - unpacking_suitcase-Ihlen_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_1_int - behavior_test_scene_name: Ihlen_1_int - behavior_task_list: "[unpacking_suitcase]" - behavior_option_model_eval: True - setting_mousetraps-Benevolence_2_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Benevolence_2_int - behavior_test_scene_name: Benevolence_2_int - behavior_task_list: "[setting_mousetraps]" - behavior_option_model_eval: True - cleaning_a_car-Ihlen_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_0_int - behavior_test_scene_name: Ihlen_0_int - behavior_task_list: "[cleaning_a_car]" - behavior_option_model_eval: True - loading_the_dishwasher-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[loading_the_dishwasher]" - behavior_option_model_eval: True -ARGS: {} -FLAGS: # general flags - offline_data_planning_timeout: 1000.0 - timeout: 1000.0 - horizon: 1000 - behavior_override_learned_samplers: True - behavior_ignore_discover_failures: True - create_training_dataset: False - num_train_tasks: 0 - num_test_tasks: 1 - behavior_mode: simple - behavior_save_video: True - behavior_option_model_rrt: True -START_SEED: 0 -NUM_SEEDS: 10 -USE_GPU: False diff --git a/scripts/configs/behavior_last_8_evaluation.yaml b/scripts/configs/behavior_last_8_evaluation.yaml deleted file mode 100644 index 757f3824e1..0000000000 --- a/scripts/configs/behavior_last_8_evaluation.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# A configuration file for 20 BEHAVIOR tasks. ---- -APPROACHES: - my-oracle: - NAME: "oracle" - FLAGS: - plan_only_eval: True - sesame_task_planner: fdopt -ENVS: - picking_up_trash-Wainscott_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Wainscott_0_int - behavior_test_scene_name: Wainscott_0_int - behavior_task_list: "[picking_up_trash]" - behavior_option_model_eval: True - polishing_furniture-Rs_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Rs_int - behavior_test_scene_name: Rs_int - behavior_task_list: "[polishing_furniture]" - behavior_option_model_eval: True - polishing_silver-Wainscott_0_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Wainscott_0_int - behavior_test_scene_name: Wainscott_0_int - behavior_task_list: "[polishing_silver]" - behavior_option_model_eval: True - putting_leftovers_away-Ihlen_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_1_int - behavior_test_scene_name: Ihlen_1_int - behavior_task_list: "[putting_leftovers_away]" - behavior_option_model_eval: True - re-shelving_library_books-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[re-shelving_library_books]" - behavior_option_model_eval: True - sorting_books-Pomaria_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Pomaria_1_int - behavior_test_scene_name: Pomaria_1_int - behavior_task_list: "[sorting_books]" - behavior_option_model_eval: True - throwing_away_leftovers-Ihlen_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_1_int - behavior_test_scene_name: Ihlen_1_int - behavior_task_list: "[throwing_away_leftovers]" - behavior_option_model_eval: True - unpacking_suitcase-Ihlen_1_int: - NAME: "behavior" - FLAGS: - behavior_train_scene_name: Ihlen_1_int - behavior_test_scene_name: Ihlen_1_int - behavior_task_list: "[unpacking_suitcase]" - behavior_option_model_eval: True -ARGS: {} -FLAGS: # general flags - offline_data_planning_timeout: 1000.0 - timeout: 1000.0 - horizon: 1000 - behavior_override_learned_samplers: True - behavior_ignore_discover_failures: True - create_training_dataset: True - num_train_tasks: 5 - num_test_tasks: 5 -START_SEED: 465 -NUM_SEEDS: 10 -USE_GPU: False From 2eaa29e37a90a5189aeb7279e0fb9ac6b8a834c0 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 15:39:03 -0400 Subject: [PATCH 13/17] removed TODOs --- .../behavior_utils/option_model_fns.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/predicators/behavior_utils/option_model_fns.py b/predicators/behavior_utils/option_model_fns.py index 2aff6d3a72..d15f42c1d3 100644 --- a/predicators/behavior_utils/option_model_fns.py +++ b/predicators/behavior_utils/option_model_fns.py @@ -118,7 +118,7 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: "right_hand"].get_position() rh_orig_grasp_orn = env.robots[0].parts["right_hand"].get_orientation() - # TODO 0 Simulate Arm Movement + # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -158,7 +158,7 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: for _ in range(5): env.step(a) - # TODO 0 Simulate Arm Movement (Backwards) + # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -217,7 +217,7 @@ def placeOntopObjectOptionModel(_init_state: State, f"place ontop {obj_to_place_onto.name} with " f"params {target_pos}") - # TODO 0 Simulate Arm Movement + # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -241,7 +241,7 @@ def placeOntopObjectOptionModel(_init_state: State, angularVelocity=[0, 0, 0], ) - # TODO 0 Simulate Arm Movement (Backwards) + # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -417,7 +417,7 @@ def placeInsideObjectOptionModel(_init_state: State, f"place inside to {obj_to_place_into.name} with " f"params {target_pos}") - # TODO 0 Simulate Arm Movement + # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -439,7 +439,7 @@ def placeInsideObjectOptionModel(_init_state: State, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0], ) - # TODO 0 Simulate Arm Movement (Backwards) + # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -527,7 +527,7 @@ def placeUnderObjectOptionModel(_init_state: State, f"place under to {obj_to_place_under.name} with " f"params {target_pos}") - # TODO 0 Simulate Arm Movement + # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -550,7 +550,7 @@ def placeUnderObjectOptionModel(_init_state: State, angularVelocity=[0, 0, 0], ) - # TODO 0 Simulate Arm Movement (Backwards) + # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -668,7 +668,7 @@ def placeNextToObjectOptionModel(_init_state: State, f"place next to {obj_to_place_nextto.name} with " f"params {target_pos}") - # TODO 0 Simulate Arm Movement + # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], @@ -691,7 +691,7 @@ def placeNextToObjectOptionModel(_init_state: State, angularVelocity=[0, 0, 0], ) - # TODO 0 Simulate Arm Movement (Backwards) + # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( step[0:3], From 177ea606f4c672b04d70556e742e9e759829a105 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 16:22:57 -0400 Subject: [PATCH 14/17] ran checks --- predicators/behavior_utils/behavior_utils.py | 3 +- .../behavior_utils/motion_planner_fns.py | 13 +++-- .../behavior_utils/option_model_fns.py | 50 +++++++++---------- predicators/main.py | 5 +- predicators/planning.py | 4 +- 5 files changed, 39 insertions(+), 36 deletions(-) diff --git a/predicators/behavior_utils/behavior_utils.py b/predicators/behavior_utils/behavior_utils.py index 25530e510b..faed1a8773 100644 --- a/predicators/behavior_utils/behavior_utils.py +++ b/predicators/behavior_utils/behavior_utils.py @@ -855,6 +855,7 @@ def get_scene_body_ids( return ids + def get_relevant_scene_body_ids( env: "BehaviorEnv", include_self: bool = False, @@ -1110,7 +1111,7 @@ def sample_navigation_params(igibson_behavior_env: "BehaviorEnv", Implemented in a separate method to enable code reuse in option_model_fns. """ - closeness_limit = 1.00 #2.00 + closeness_limit = 1.00 #2.00 nearness_limit = 0.15 distance = nearness_limit + ( (closeness_limit - nearness_limit) * rng.random()) diff --git a/predicators/behavior_utils/motion_planner_fns.py b/predicators/behavior_utils/motion_planner_fns.py index 732d0bd1f6..55701603c1 100644 --- a/predicators/behavior_utils/motion_planner_fns.py +++ b/predicators/behavior_utils/motion_planner_fns.py @@ -128,7 +128,8 @@ def sample_fn(env: "BehaviorEnv", obstacles = get_relevant_scene_body_ids(env) if env.robots[0].parts["right_hand"].object_in_hand is not None: if env.robots[0].parts["right_hand"].object_in_hand in obstacles: - obstacles.remove(env.robots[0].parts["right_hand"].object_in_hand) + obstacles.remove( + env.robots[0].parts["right_hand"].object_in_hand) plan = plan_base_motion_br( robot=env.robots[0], end_conf=end_conf, @@ -287,8 +288,8 @@ def make_grasp_plan( end_conf=end_conf, hand_limits=((minx, miny, minz), (maxx, maxy, maxz)), obstacles=get_relevant_scene_body_ids(env, - include_self=True, - include_right_hand=True), + include_self=True, + include_right_hand=True), rng=rng, ) p.restoreState(state) @@ -296,7 +297,8 @@ def make_grasp_plan( pos = env.robots[0].parts["right_hand"].get_position() plan = [[pos[0], pos[1], pos[2]] + list( p.getEulerFromQuaternion( - env.robots[0].parts["right_hand"].get_orientation())), end_conf] + env.robots[0].parts["right_hand"].get_orientation())), + end_conf] # NOTE: This below line is *VERY* important after the # pybullet state is restored. The hands keep an internal @@ -457,7 +459,8 @@ def make_place_plan( pos = env.robots[0].parts["right_hand"].get_position() plan = [[pos[0], pos[1], pos[2]] + list( p.getEulerFromQuaternion( - env.robots[0].parts["right_hand"].get_orientation())), end_conf] + env.robots[0].parts["right_hand"].get_orientation())), + end_conf] # NOTE: This below line is *VERY* important after the # pybullet state is restored. The hands keep an internal diff --git a/predicators/behavior_utils/option_model_fns.py b/predicators/behavior_utils/option_model_fns.py index d15f42c1d3..8d0ba75c62 100644 --- a/predicators/behavior_utils/option_model_fns.py +++ b/predicators/behavior_utils/option_model_fns.py @@ -121,8 +121,7 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -161,8 +160,7 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -220,8 +218,7 @@ def placeOntopObjectOptionModel(_init_state: State, # Simulate Arm Movement for step in plan: env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -244,8 +241,7 @@ def placeOntopObjectOptionModel(_init_state: State, # Simulate Arm Movement (Backwards) for step in reversed(plan): env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -419,9 +415,9 @@ def placeInsideObjectOptionModel(_init_state: State, # Simulate Arm Movement for step in plan: - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -441,9 +437,9 @@ def placeInsideObjectOptionModel(_init_state: State, ) # Simulate Arm Movement (Backwards) for step in reversed(plan): - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -529,9 +525,9 @@ def placeUnderObjectOptionModel(_init_state: State, # Simulate Arm Movement for step in plan: - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -552,12 +548,12 @@ def placeUnderObjectOptionModel(_init_state: State, # Simulate Arm Movement (Backwards) for step in reversed(plan): - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) - + env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) # this is running a series of zero action to step @@ -670,9 +666,9 @@ def placeNextToObjectOptionModel(_init_state: State, # Simulate Arm Movement for step in plan: - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) @@ -693,9 +689,9 @@ def placeNextToObjectOptionModel(_init_state: State, # Simulate Arm Movement (Backwards) for step in reversed(plan): - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], - p.getQuaternionFromEuler(step[3:6])) + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) for _ in range(1): env.step(np.zeros(env.action_space.shape)) diff --git a/predicators/main.py b/predicators/main.py index 02478e3399..ba43d8e21f 100644 --- a/predicators/main.py +++ b/predicators/main.py @@ -35,7 +35,6 @@ """ import curses -import numpy as np import logging import os import sys @@ -45,6 +44,7 @@ from typing import List, Optional, Sequence, Tuple import dill as pkl +import numpy as np from predicators import utils from predicators.approaches import ApproachFailure, ApproachTimeout, \ @@ -355,7 +355,8 @@ def _run_testing(env: BaseEnv, approach: BaseApproach) -> Metrics: have finished positioning: ") flag = win.getch() while flag == -1 or chr(flag) != 'q': - env.igibson_behavior_env.step(np.zeros(env.action_space.shape)) + env.igibson_behavior_env.step( + np.zeros(env.action_space.shape)) flag = win.getch() curses.endwin() logging.info("VIDEO CREATION MODE: Starting planning.") diff --git a/predicators/planning.py b/predicators/planning.py index 02f6e535d4..4b4ce478c6 100644 --- a/predicators/planning.py +++ b/predicators/planning.py @@ -1012,7 +1012,9 @@ def _sesame_plan_with_fast_downward( potentially add effects to null operators, but this ability is not implemented here. """ - init_atoms = utils.abstract(task.init, predicates, skip_allclose_check=True) + init_atoms = utils.abstract(task.init, + predicates, + skip_allclose_check=True) objects = list(task.init) start_time = time.perf_counter() # Create the domain and problem strings, then write them to tempfiles. From db10dfca20ee358050cce1f074c2bd694899f24f Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 16:33:28 -0400 Subject: [PATCH 15/17] fixed lint --- predicators/main.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/predicators/main.py b/predicators/main.py index 6269a4e55e..b6313b7d49 100644 --- a/predicators/main.py +++ b/predicators/main.py @@ -350,9 +350,9 @@ def _run_testing(env: BaseEnv, approach: BaseApproach) -> Metrics: win.nodelay(True) win.addstr( 0, 0, - "VIDEO CREATION MODE: You have time to position the iggui window \ - to the location you want for recording. Type 'q' to indicate you \ - have finished positioning: ") + "VIDEO CREATION MODE: You have time to position the \ + iggui window to the location you want for recording. \ + Type 'q' to indicate you have finished positioning: ") flag = win.getch() while flag == -1 or chr(flag) != 'q': env.igibson_behavior_env.step( From f8af1f743fa689a9d9da8b8aee0bd0e3fa950ba7 Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 16:49:26 -0400 Subject: [PATCH 16/17] fixed lint --- predicators/behavior_utils/behavior_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/predicators/behavior_utils/behavior_utils.py b/predicators/behavior_utils/behavior_utils.py index faed1a8773..6a51276072 100644 --- a/predicators/behavior_utils/behavior_utils.py +++ b/predicators/behavior_utils/behavior_utils.py @@ -1111,7 +1111,7 @@ def sample_navigation_params(igibson_behavior_env: "BehaviorEnv", Implemented in a separate method to enable code reuse in option_model_fns. """ - closeness_limit = 1.00 #2.00 + closeness_limit = CFG.behavior_closeness_limit nearness_limit = 0.15 distance = nearness_limit + ( (closeness_limit - nearness_limit) * rng.random()) From cef7712b0aa26033d0081c3d7863d8b13532361e Mon Sep 17 00:00:00 2001 From: Willie McClinton Date: Thu, 2 Nov 2023 16:50:37 -0400 Subject: [PATCH 17/17] lint --- .../behavior_utils/option_model_fns.py | 128 ++++++++++-------- predicators/envs/behavior.py | 2 +- predicators/settings.py | 1 + 3 files changed, 74 insertions(+), 57 deletions(-) diff --git a/predicators/behavior_utils/option_model_fns.py b/predicators/behavior_utils/option_model_fns.py index 8d0ba75c62..08eefce5ff 100644 --- a/predicators/behavior_utils/option_model_fns.py +++ b/predicators/behavior_utils/option_model_fns.py @@ -119,11 +119,12 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: rh_orig_grasp_orn = env.robots[0].parts["right_hand"].get_orientation() # Simulate Arm Movement - for step in plan: - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) # 1 Teleport Hand to Grasp offset location env.robots[0].parts["right_hand"].set_position_orientation( @@ -158,11 +159,12 @@ def graspObjectOptionModel(_state: State, env: "BehaviorEnv") -> None: env.step(a) # Simulate Arm Movement (Backwards) - for step in reversed(plan): - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) # 4 Move Hand to Original Location env.robots[0].parts["right_hand"].set_position_orientation( @@ -216,11 +218,12 @@ def placeOntopObjectOptionModel(_init_state: State, f"params {target_pos}") # Simulate Arm Movement - for step in plan: - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in plan: + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].set_position_orientation( target_pos, p.getQuaternionFromEuler(target_orn)) @@ -239,11 +242,12 @@ def placeOntopObjectOptionModel(_init_state: State, ) # Simulate Arm Movement (Backwards) - for step in reversed(plan): - env.robots[0].parts["right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in reversed(plan): + env.robots[0].parts["right_hand"].set_position_orientation( + step[0:3], p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) @@ -414,12 +418,14 @@ def placeInsideObjectOptionModel(_init_state: State, f"params {target_pos}") # Simulate Arm Movement - for step in plan: - env.robots[0].parts[ - "right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in plan: + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].force_release_obj() obj_to_place_into.force_wakeup() @@ -436,12 +442,14 @@ def placeInsideObjectOptionModel(_init_state: State, angularVelocity=[0, 0, 0], ) # Simulate Arm Movement (Backwards) - for step in reversed(plan): - env.robots[0].parts[ - "right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in reversed(plan): + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) @@ -524,12 +532,14 @@ def placeUnderObjectOptionModel(_init_state: State, f"params {target_pos}") # Simulate Arm Movement - for step in plan: - env.robots[0].parts[ - "right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in plan: + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].force_release_obj() obj_to_place_under.force_wakeup() @@ -547,12 +557,14 @@ def placeUnderObjectOptionModel(_init_state: State, ) # Simulate Arm Movement (Backwards) - for step in reversed(plan): - env.robots[0].parts[ - "right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in reversed(plan): + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) @@ -665,12 +677,14 @@ def placeNextToObjectOptionModel(_init_state: State, f"params {target_pos}") # Simulate Arm Movement - for step in plan: - env.robots[0].parts[ - "right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in plan: + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].force_release_obj() obj_to_place_nextto.force_wakeup() @@ -688,12 +702,14 @@ def placeNextToObjectOptionModel(_init_state: State, ) # Simulate Arm Movement (Backwards) - for step in reversed(plan): - env.robots[0].parts[ - "right_hand"].set_position_orientation( - step[0:3], p.getQuaternionFromEuler(step[3:6])) - for _ in range(1): - env.step(np.zeros(env.action_space.shape)) + if CFG.behavior_option_model_rrt: + for step in reversed(plan): + env.robots[0].parts[ + "right_hand"].set_position_orientation( + step[0:3], + p.getQuaternionFromEuler(step[3:6])) + for _ in range(1): + env.step(np.zeros(env.action_space.shape)) env.robots[0].parts["right_hand"].set_position_orientation( rh_orig_grasp_position, rh_orig_grasp_orn) diff --git a/predicators/envs/behavior.py b/predicators/envs/behavior.py index ae60280ead..68cdcbb1f5 100644 --- a/predicators/envs/behavior.py +++ b/predicators/envs/behavior.py @@ -775,7 +775,7 @@ def _reachable_classifier(self, return False return (np.linalg.norm( # type: ignore np.array(robot_obj.get_position()) - - np.array(ig_obj.get_position())) < 1.0) + np.array(ig_obj.get_position())) < CFG.behavior_closeness_limit) def _reachable_nothing_classifier( self, diff --git a/predicators/settings.py b/predicators/settings.py index 1c5118e6d3..1869da78c7 100644 --- a/predicators/settings.py +++ b/predicators/settings.py @@ -130,6 +130,7 @@ class GlobalSettings: behavior_option_model_eval = False behavior_option_model_rrt = False behavior_override_learned_samplers = False + behavior_closeness_limit = 1.00 # if this is True, then we will not use discovered failures in behavior. behavior_ignore_discover_failures = True create_training_dataset = False