diff --git a/mushr_rhc_ros/config/real_bag.rviz b/mushr_rhc_ros/config/real_bag.rviz new file mode 100644 index 0000000..2354e6f --- /dev/null +++ b/mushr_rhc_ros/config/real_bag.rviz @@ -0,0 +1,588 @@ +Panels: + - Class: rviz/Displays + Help Height: 0 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + - /Grid1 + - /Map1 + - /Map1/Orientation1 + - /Odometry1 + - /Marker2 + - /Odometry2/Shape1 + - /TF1 + - /TF1/Frames1 + - /Marker7 + - /Marker8 + - /Marker8/Namespaces1 + - /DepthCloud1/Auto Size1 + - /Marker10 + - /Marker11 + - /Marker12 + - /Marker13 + Splitter Ratio: 0.5 + Tree Height: 626 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: LaserScan +Preferences: + PromptSaveOnExit: true +Toolbars: + toolButtonStyle: 2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 54; 205; 196 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 400 + Reference Frame: + Value: true + - Alpha: 0.30000001192092896 + Class: rviz/Map + Color Scheme: map + Draw Behind: true + Enabled: true + Name: Map + Topic: /map + Unreliable: false + Use Timestamp: false + Value: true + - Alpha: 1 + Class: rviz/RobotModel + Collision Enabled: false + Enabled: true + Links: + All Links Enabled: true + Expand Joint Details: false + Expand Link Details: false + Expand Tree: false + Link Tree Style: Links in Alphabetic Order + Name: RobotModel + Robot Description: car/robot_description + TF Prefix: car + Update Interval: 0 + Value: true + Visual Enabled: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/LaserScan + Color: 255; 22; 144 + Color Transformer: FlatColor + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 22; 144 + Max Intensity: 4096 + Min Color: 255; 22; 144 + Min Intensity: 0 + Name: LaserScan + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.029999999329447746 + Style: Points + Topic: /car/scan + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /car/rhcontroller/traj_chosen + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Angle Tolerance: 0.10000000149011612 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: true + Position: + Alpha: 0.30000001192092896 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: true + Enabled: true + Keep: 100 + Name: Odometry + Position Tolerance: 0.10000000149011612 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.10000000149011612 + Color: 255; 25; 0 + Head Length: 0.30000001192092896 + Head Radius: 0.10000000149011612 + Shaft Length: 1 + Shaft Radius: 0.05000000074505806 + Value: Arrow + Topic: /car/particle_filter/odom + Unreliable: false + Value: true + - Alpha: 0.5 + Axes Length: 1 + Axes Radius: 0.10000000149011612 + Class: rviz/Pose + Color: 255; 25; 0 + Enabled: true + Head Length: 0.30000001192092896 + Head Radius: 0.10000000149011612 + Name: Pose + Shaft Length: 0.5 + Shaft Radius: 0.05000000074505806 + Shape: Arrow + Topic: /car/particle_filter/inferred_pose + Unreliable: false + Value: true + - Alpha: 1 + Arrow Length: 0.30000001192092896 + Axes Length: 0.30000001192092896 + Axes Radius: 0.009999999776482582 + Class: rviz/PoseArray + Color: 255; 25; 0 + Enabled: true + Head Length: 0.07000000029802322 + Head Radius: 0.029999999329447746 + Name: PoseArray + Shaft Length: 0.23000000417232513 + Shaft Radius: 0.009999999776482582 + Shape: Arrow (Flat) + Topic: /car/particle_filter/particles + Unreliable: false + Value: true + - Alpha: 0.699999988079071 + Class: rviz/Map + Color Scheme: map + Draw Behind: false + Enabled: true + Name: Map + Topic: /car/map + Unreliable: false + Use Timestamp: false + Value: true + - Class: rviz/Marker + Enabled: false + Marker Topic: /car/rhcontroller/markers + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: false + - Angle Tolerance: 0.10000000149011612 + Class: rviz/Odometry + Covariance: + Orientation: + Alpha: 0.5 + Color: 255; 255; 127 + Color Style: Unique + Frame: Local + Offset: 1 + Scale: 1 + Value: true + Position: + Alpha: 0.30000001192092896 + Color: 204; 51; 204 + Scale: 1 + Value: true + Value: false + Enabled: false + Keep: 1 + Name: Odometry + Position Tolerance: 0.10000000149011612 + Shape: + Alpha: 1 + Axes Length: 1 + Axes Radius: 0.10000000149011612 + Color: 233; 185; 110 + Head Length: 0.30000001192092896 + Head Radius: 0.10000000149011612 + Shaft Length: 0.5 + Shaft Radius: 0.05000000074505806 + Value: Arrow + Topic: /car/camera/odom/sample + Unreliable: false + Value: false + - Class: rviz/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: false + car/back_left/wheel_link: + Value: true + car/back_right/wheel_link: + Value: true + car/base_footprint: + Value: true + car/base_link: + Value: true + car/camera_accel_frame: + Value: true + car/camera_accel_optical_frame: + Value: true + car/camera_bottom_screw_frame: + Value: true + car/camera_d465i_color_frame: + Value: true + car/camera_d465i_color_optical_frame: + Value: true + car/camera_d465i_depth_frame: + Value: true + car/camera_d465i_depth_optical_frame: + Value: true + car/camera_d465i_link: + Value: true + car/camera_gyro_frame: + Value: true + car/camera_gyro_optical_frame: + Value: true + car/camera_link: + Value: true + car/camera_odom_frame: + Value: true + car/camera_pose_frame: + Value: true + car/front_left/wheel_link: + Value: true + car/front_left/wheel_steer_link: + Value: true + car/front_right/wheel_link: + Value: true + car/front_right/wheel_steer_link: + Value: true + car/insets_link: + Value: true + car/laser_link: + Value: true + car/odom: + Value: true + car/platform_link: + Value: true + map: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: true + Show Axes: true + Show Names: true + Tree: + map: + car/camera_odom_frame: + car/camera_pose_frame: + car/camera_link: + car/camera_accel_frame: + car/camera_accel_optical_frame: + {} + car/camera_gyro_frame: + car/camera_gyro_optical_frame: + {} + car/odom: + car/base_footprint: + car/base_link: + car/back_left/wheel_link: + {} + car/back_right/wheel_link: + {} + car/camera_bottom_screw_frame: + {} + car/front_left/wheel_steer_link: + car/front_left/wheel_link: + {} + car/front_right/wheel_steer_link: + car/front_right/wheel_link: + {} + car/insets_link: + {} + car/laser_link: + {} + car/platform_link: + {} + Update Interval: 0 + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /car/rhcdebug/current_path + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /car/rhcdebug/debug/viz_rollouts + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /car/rhcdebug/goal + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: false + Marker Topic: /car/rhcdebug/markers + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: false + - Class: rviz/Marker + Enabled: true + Marker Topic: /car/rhcdebug/traj_chosen + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: false + Marker Topic: /car/rhcontroller/markers + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: false + - Class: rviz/Marker + Enabled: true + Marker Topic: /car/rhcontroller/markers_zero_cost + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /car/rhcontroller/goal + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /car/camera_d465i/color/image_raw + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Queue Size: 2 + Transport Hint: compressed + Unreliable: false + Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /car/camera_d465i/depth/image_rect_raw + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true + - Alpha: 0.10000000149011612 + Auto Size: + Auto Size Factor: 1 + Value: true + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 3.3612513542175293 + Min Value: -4.5343146324157715 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/DepthCloud + Color: 255; 255; 255 + Color Image Topic: "" + Color Transformer: AxisColor + Color Transport Hint: raw + Decay Time: 10 + Depth Map Topic: /car/camera_d465i/depth/image_rect_raw + Depth Map Transport Hint: raw + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: DepthCloud + Occlusion Compensation: + Occlusion Time-Out: 30 + Value: false + Position Transformer: XYZ + Queue Size: 5 + Selectable: true + Size (Pixels): 3 + Style: Flat Squares + Topic Filter: true + Use Fixed Frame: true + Use rainbow: true + Value: false + - Class: rviz/Marker + Enabled: true + Marker Topic: /map_marker + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /loc_anchor_pose_marker + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /loc_current_pose_marker + Name: Marker + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/Marker + Enabled: true + Marker Topic: /model_action_marker + Name: Marker + Namespaces: + "": true + Queue Size: 100 + Value: true + Enabled: true + Global Options: + Background Color: 92; 38; 134 + Default Light: true + Fixed Frame: map + Frame Rate: 30 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Theta std deviation: 0.2617993950843811 + Topic: /initialpose + X std deviation: 0.5 + Y std deviation: 0.5 + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/ThirdPersonFollower + Distance: 21.35464859008789 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 10.403855323791504 + Y: -9.215852737426758 + Z: -6.15337849012576e-5 + Focal Shape Fixed Size: false + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.9147966504096985 + Target Frame: map + Value: ThirdPersonFollower (rviz) + Yaw: 0.150452122092247 + Saved: + - Angle: 0 + Class: rviz/TopDownOrtho + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Invert Z Axis: false + Name: TopDownOrtho + Near Clip Distance: 0.009999999776482582 + Scale: 206.59109497070312 + Target Frame: base_link + Value: TopDownOrtho (rviz) + X: 0 + Y: 0 +Window Geometry: + Displays: + collapsed: false + Height: 1403 + Hide Left Dock: false + Hide Right Dock: false + Image: + collapsed: false + QMainWindow State: 000000ff00000000fd000000040000000000000376000004ddfc0200000010fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f00700065007200740069006500730200000a00000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002af000000c900fffffffb0000000a0049006d00610067006501000002f2000001130000001600fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d006100670065010000040b0000010f0000001600fffffffb0000000a0049006d0061006700650100000375000001a50000000000000000fb0000000a0049006d00610067006501000003d6000001440000000000000000fb0000000a0049006d00610067006501000003ed0000012d0000000000000000fb0000000a0049006d0061006700650100000443000000d70000000000000000fb0000000a0049006d006100670065010000040c0000010e0000000000000000fb0000000a0049006d00610067006501000003ae0000016c0000000000000000000000010000010f000004ddfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d000004dd000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e1000001970000000300000a000000003efc0100000002fb0000000800540069006d0065010000000000000a00000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000056f000004dd00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: false + Width: 2560 + X: 0 + Y: 700 diff --git a/mushr_rhc_ros/launch/real/real_model_deploy_new.launch b/mushr_rhc_ros/launch/real/real_model_deploy_new.launch index a9efcb1..81cc08e 100644 --- a/mushr_rhc_ros/launch/real/real_model_deploy_new.launch +++ b/mushr_rhc_ros/launch/real/real_model_deploy_new.launch @@ -10,8 +10,18 @@ + + + + + + + + + + - + @@ -39,7 +49,8 @@ - + + diff --git a/mushr_rhc_ros/launch/sim/sim_server_eval.launch b/mushr_rhc_ros/launch/sim/sim_server_eval.launch index 269ee74..0b4f587 100644 --- a/mushr_rhc_ros/launch/sim/sim_server_eval.launch +++ b/mushr_rhc_ros/launch/sim/sim_server_eval.launch @@ -10,7 +10,17 @@ - + + + + + + + + + + + @@ -45,6 +55,7 @@ + diff --git a/mushr_rhc_ros/src/mingpt/model_mushr_new2.py b/mushr_rhc_ros/src/mingpt/model_mushr_new2.py index ff5f2b3..df08779 100644 --- a/mushr_rhc_ros/src/mingpt/model_mushr_new2.py +++ b/mushr_rhc_ros/src/mingpt/model_mushr_new2.py @@ -127,6 +127,7 @@ def __init__(self, config, device): self.model_type = config.model_type self.use_pred_state = config.use_pred_state + self.use_pred_action = config.use_pred_action self.map_recon_dim = config.map_recon_dim self.freeze_core = config.freeze_core @@ -248,22 +249,37 @@ def __init__(self, config, device): self.load_pretrained_model_weights(config.pretrained_model_path) def load_pretrained_model_weights(self, model_path): - if model_path: + if len(model_path)>3: # some arbitrary length for model path checkpoint = torch.load(model_path, map_location=self.device) # remove the 'module.' string from dict if the saved model was dataparallel - new_checkpoint = OrderedDict() - for key in checkpoint['state_dict'].keys(): - if key.startswith('module.'): - new_checkpoint[key.split("module.",1)[1]] = checkpoint['state_dict'][key] - else: - new_checkpoint[key] = checkpoint['state_dict'][key] + # new_checkpoint = OrderedDict() + # for key in checkpoint['state_dict'].keys(): + # if key.startswith('module.'): + # new_checkpoint[key.split("module.",1)[1]] = checkpoint['state_dict'][key] + # else: + # new_checkpoint[key] = checkpoint['state_dict'][key] # find the common keys # separate the components from the main transformer, tokenizer, and decoders + # remove the keys that have elements we want from scratch instead of pre-trained + modules_to_avoid = [] + if 'loc' in self.config.decoders_to_reset: + modules_to_avoid.append('predict_pose') + if 'map' in self.config.decoders_to_reset: + modules_to_avoid.append('map_decoder') + new_checkpoint = OrderedDict() + for key in checkpoint['state_dict'].keys(): + if not any(module in key for module in modules_to_avoid): + new_checkpoint[key] = checkpoint['state_dict'][key] + else: + # just for debugging + print("LOAD: avoiding key:" + key) + self.load_state_dict(new_checkpoint, strict=False) + # self.load_state_dict(checkpoint['state_dict'], strict=False) print('Successfully loaded pretrained checkpoint: {}.'.format(model_path)) # for key in ckpt: @@ -328,6 +344,7 @@ def configure_optimizers_frozen(self, train_config): # for p in self.parameters(): # if p.requires_grad: # print(p) + print("FROZEN OPTIMIZER FINALIZED") return optimizer def configure_optimizers(self, train_config): @@ -339,11 +356,13 @@ def configure_optimizers(self, train_config): """ # separate out all parameters to those that will and won't experience regularizing weight decay + decay_blocks = set() decay = set() no_decay = set() # whitelist_weight_modules = (torch.nn.Linear, ) whitelist_weight_modules = (torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.ConvTranspose2d) blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding, torch.nn.BatchNorm3d, torch.nn.BatchNorm2d, torch.nn.BatchNorm1d) + for mn, m in self.named_modules(): for pn, p in m.named_parameters(): fpn = '%s.%s' % (mn, pn) if mn else pn # full param name @@ -352,7 +371,10 @@ def configure_optimizers(self, train_config): no_decay.add(fpn) elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): # weights of whitelist modules will be weight decayed - decay.add(fpn) + if 'blocks' in mn: + decay_blocks.add(fpn) + else: + decay.add(fpn) elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): # weights of blacklist modules will NOT be weight decayed no_decay.add(fpn) @@ -363,8 +385,8 @@ def configure_optimizers(self, train_config): # validate that we considered every parameter param_dict = {pn: p for pn, p in self.named_parameters()} - inter_params = decay & no_decay - union_params = decay | no_decay + inter_params = (decay & no_decay) | (decay_blocks & decay) | (no_decay & decay_blocks) + union_params = decay | no_decay | decay_blocks assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \ % (str(param_dict.keys() - union_params), ) @@ -372,9 +394,11 @@ def configure_optimizers(self, train_config): # create the pytorch optimizer object optim_groups = [ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay}, + {"params": [param_dict[pn] for pn in sorted(list(decay_blocks))], "weight_decay": train_config.weight_decay_blocks}, {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, ] optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas) + print("REGULAR NON FROZEN OPTIMIZER FINALIZED") return optimizer # state, and action @@ -437,9 +461,14 @@ def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, po position_embeddings_local = torch.repeat_interleave(position_embeddings_local, 2, dim=1) # position_embeddings_local = position_embeddings_local.repeat(B,1,1) + # to debug tokenizer representation + # print(token_embeddings[0,30,:]) + x = self.drop(token_embeddings + position_embeddings_global + position_embeddings_local) x = self.blocks(x) x = self.ln_f(x) + + # print(x[0,30,:20]) if self.config.train_mode == 'e2e': # from the tokens of s_t, predict next action a_t (like a policy) @@ -466,7 +495,7 @@ def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, po pose_feat = pose_feat.reshape(B, int(N/2)-1, -1) pose_preds = self.predict_pose_deep(pose_feat) # pose_preds[:,:,2:] = torch.tanh(pose_preds[:,:,2:]) - elif self.config.train_mode == 'joint': + elif self.config.train_mode == 'jointttt': action_preds = self.predict_action(x[:, ::2, :]) percep_feat = x[:, ::2, :] B, N, D = percep_feat.shape @@ -499,7 +528,11 @@ def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, po w_state = self.config.state_loss_weight else: w_state = 0.0 - loss = loss_act + w_state*loss_states + if self.config.use_pred_action: + w_act = self.config.act_loss_weight + else: + w_act = 0.0 + loss = w_act*loss_act + w_state*loss_states return action_preds, loss, loss_act, loss_states elif self.config.train_mode == 'loc': # loss over N timesteps @@ -514,7 +547,7 @@ def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, po self.config.loc_y_loss_weight*loss_translation_y + \ self.config.loc_angle_loss_weight*loss_angle return pose_preds, loss, loss_translation_x, loss_translation_y, loss_angle - elif self.config.train_mode == 'joint': + elif self.config.train_mode == 'jointtttt': return action_preds, map_recon, pose_preds diff --git a/mushr_rhc_ros/src/preprocessing_utils.py b/mushr_rhc_ros/src/preprocessing_utils.py index b61d65f..022f004 100644 --- a/mushr_rhc_ros/src/preprocessing_utils.py +++ b/mushr_rhc_ros/src/preprocessing_utils.py @@ -10,11 +10,21 @@ def denorm_angle(angle): # denormalize all actions act_max = 0.38 act_min = -0.38 - return (angle*(act_max-act_min)+act_max+act_min)/2 + denorm_angle = (angle*(act_max-act_min)+act_max+act_min)/2.0 + # denorm_angle = max(min(0.1,denorm_angle),-0.1) + return denorm_angle + +def load_params(data_col, is_real_deployment=True): + + if not is_real_deployment: + # print("num of nans before: {}".format(np.isnan(data_col[1:]).sum())) + # insert artificial drop in lasers + random_indices_to_replace = np.random.choice(np.arange(1,data_col.shape[0]), int(data_col[1:].shape[0]*0.2), replace=False) + data_col[random_indices_to_replace] = np.nan + # print("num of nans after: {}".format(np.isnan(data_col[1:]).sum())) -def load_params(data_col): condition = (data_col[1:]<12.0) & (data_col[1:]>0.5) & (~np.isnan(data_col[1:])) # params for the dataset collection - # condition = (data_col[1:]<4.0) & (data_col[1:]>0.1) & (~np.isnan(data_col[1:])) + # condition = (data_col[1:]<8.0) & (data_col[1:]>0.02) & (~np.isnan(data_col[1:])) ok_R = np.extract(condition, data_col[1:]) num_points = ok_R.shape[0] # angles = np.linspace(0, 2*np.pi, 720)*-1.0 + np.pi # aligned in car coordinate frame (because ydlidar points backwards) diff --git a/mushr_rhc_ros/src/rhcnode_network_pcl_new.py b/mushr_rhc_ros/src/rhcnode_network_pcl_new.py index ee0eb2c..70c0932 100755 --- a/mushr_rhc_ros/src/rhcnode_network_pcl_new.py +++ b/mushr_rhc_ros/src/rhcnode_network_pcl_new.py @@ -84,8 +84,8 @@ def __init__(self, dtype, params, logger, name): self.events = [self.goal_event, self.map_metadata_event, self.ready_event] self.run = True - self.default_speed = 2.5 - # self.default_speed = 1.0 + # self.default_speed = 2.5 + self.default_speed = 1.0 self.default_angle = 0.0 self.nx = None self.ny = None @@ -122,13 +122,14 @@ def __init__(self, dtype, params, logger, name): max_timestep = self.clip_len mconf = GPTConfig(block_size, max_timestep, - n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, - state_tokenizer='pointnet', pretrained_encoder_path='', - loss='MSE', train_mode='e2e', pretrained_model_path='', - map_decoder='deconv', map_recon_dim=64, freeze_core=False, - state_loss_weight=0.1, - loc_x_loss_weight=0.01, loc_y_loss_weight=0.1, loc_angle_loss_weight=10.0, - loc_decoder_type='joint') + n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, use_pred_action=True, + state_tokenizer='pointnet', pretrained_encoder_path='', + loss='MSE', train_mode='e2e', pretrained_model_path='', + map_decoder='deconv', map_recon_dim=64, freeze_core=False, + state_loss_weight=0.1, act_loss_weight=1.0, + loc_x_loss_weight=0.01, loc_y_loss_weight=0.1, loc_angle_loss_weight=10.0, + loc_decoder_type='joint', decoders_to_reset='') + model = GPT(mconf, device) # model=torch.nn.DataParallel(model) @@ -170,7 +171,7 @@ def __init__(self, dtype, params, logger, name): saved_map_model_path = rospy.get_param("~model_path_map", '') mconf_map = GPTConfig(block_size, max_timestep, - n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, + n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, use_pred_action=True, state_tokenizer='pointnet', pretrained_encoder_path='', loss='MSE', train_mode='map', pretrained_model_path='', map_decoder='deconv', map_recon_dim=64, freeze_core=False, @@ -293,7 +294,11 @@ def start(self): # publish next action if self.compute_network_action: # don't have to run the network at all times, only when scans change and scans are full - self.last_action = self.apply_network() + network_action = self.apply_network() + self.last_action = network_action + # self.last_action = 0.0 + # if self.last_action<0: + # self.last_action *= 1.2 # rospy.loginfo("Applied network: "+str(self.last_action)) self.compute_network_action = False self.publish_vel_marker() @@ -696,16 +701,17 @@ def process_scan(self, msg): scan = np.zeros((721), dtype=np.float) scan[0] = msg.header.stamp.to_sec() scan[1:] = msg.ranges - original_points, sensor_origins, time_stamps, pc_range, voxel_size, lo_occupied, lo_free = pre.load_params(scan) + original_points, sensor_origins, time_stamps, pc_range, voxel_size, lo_occupied, lo_free = pre.load_params(scan, self.is_real_deployment) - vis_mat, nx, ny = pre.compute_bev_image(original_points, sensor_origins, time_stamps, pc_range, voxel_size) - cv2.imshow('graycsale image',vis_mat) - cv2.waitKey(1) + # vis_mat, nx, ny = pre.compute_bev_image(original_points, sensor_origins, time_stamps, pc_range, voxel_size) + # cv2.imshow('graycsale image',vis_mat) + # cv2.waitKey(1) points_to_save = np.zeros(shape=(720,2)) points_to_save[:original_points.shape[0],:] = original_points[:,:2] if original_points.shape[0]>0: random_indices = np.random.choice(original_points.shape[0], 720-original_points.shape[0], replace=True) + # print(random_indices) points_to_save[original_points.shape[0]:,:] = original_points[random_indices,:2] return points_to_save diff --git a/mushr_rhc_ros/src/rhcnode_network_resnet_new.py b/mushr_rhc_ros/src/rhcnode_network_resnet_new.py new file mode 100755 index 0000000..4099389 --- /dev/null +++ b/mushr_rhc_ros/src/rhcnode_network_resnet_new.py @@ -0,0 +1,902 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2019, The Personal Robotics Lab, The MuSHR Team, The Contributors of MuSHR +# License: BSD 3-Clause. See LICENSE.md file in root directory. + +# from torchsummary import summary +import sys +import os +import signal +import threading +import random +import numpy as np +from queue import Queue +import time +from collections import OrderedDict +import math +import copy + +import rospy +from ackermann_msgs.msg import AckermannDriveStamped +from geometry_msgs.msg import Point, PoseStamped, PoseWithCovarianceStamped +from std_msgs.msg import ColorRGBA, Empty, String +from std_srvs.srv import Empty as SrvEmpty +from visualization_msgs.msg import Marker +from sensor_msgs.msg import LaserScan + +import logger +import parameters +import rhcbase +import rhctensor +import utilss +import librhc.utils as utils_other + +import torch +from mingpt.model_resnetdirect import ResnetDirect, ResnetDirectWithActions +# from mingpt.model_musher import GPT, GPTConfig +# from mingpt.model_mushr_rogerio import GPT, GPTConfig +# from mingpt.model_mushr_nips import GPT, GPTConfig +# from mingpt.model_mushr_new import GPT, GPTdiff, GPTConfig +from mingpt.model_mushr_new2 import GPT, GPTConfig +import preprocessing_utils as pre +from visualization_msgs.msg import Marker + +import cv2 + +# import torch_tensorrt + + +class RHCNode(rhcbase.RHCBase): + def __init__(self, dtype, params, logger, name): + rospy.init_node(name, anonymous=True, disable_signals=True) + + super(RHCNode, self).__init__(dtype, params, logger) + + self.small_queue_lock = threading.Lock() # for mapping and action prediction + self.large_queue_lock = threading.Lock() # for localization + self.pos_lock = threading.Lock() # for storing the vehicle pose + + self.curr_pose = None + + self.reset_lock = threading.Lock() + self.inferred_pose_lock = threading.Lock() + self.inferred_pose_lock_prev = threading.Lock() + self._inferred_pose = None + self._inferred_pose_prev = None + self._time_of_inferred_pose = None + self._time_of_inferred_pose_prev = None + + self.hp_zerocost_ids = None + self.hp_map = None + self.hp_world = None + self.time_started_goal = None + self.num_trials = 0 + + self.act_inference_time_sum = 0.0 + self.act_inference_time_count = 0 + + self.cur_rollout = self.cur_rollout_ip = None + self.traj_pub_lock = threading.Lock() + + self.goal_event = threading.Event() + self.map_metadata_event = threading.Event() + self.ready_event = threading.Event() + self.events = [self.goal_event, self.map_metadata_event, self.ready_event] + self.run = True + + self.default_speed = 2.5 + # self.default_speed = 1.0 + self.default_angle = 0.0 + self.nx = None + self.ny = None + + self.points_viz_list = None + self.map_recon = None + self.loc_counter = 0 + self.time_sent_reset = None + self.current_frame = None + + # network loading + print("Starting to load model") + os.environ["CUDA_VISIBLE_DEVICES"]=str(0) + device = torch.device('cuda') + # device = "cpu" + + self.device = device + self.clip_len = 16 + + self.is_real_deployment = rospy.get_param("~is_real_deployment", False) + + self.map_type = rospy.get_param("~deployment_map", 'train') + + self.use_map = rospy.get_param("~use_map", False) + self.use_loc = rospy.get_param("~use_loc", False) + + saved_model_path_action = rospy.get_param("~model_path_act", '') + self.out_path = rospy.get_param("~out_path", 'default_value') + + self.n_layers = rospy.get_param("~n_layers", 12) + + vocab_size = 100 + block_size = self.clip_len * 2 + max_timestep = self.clip_len + + mconf = GPTConfig(block_size, max_timestep, + n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, use_pred_action=True, + state_tokenizer='resnet18', pretrained_encoder_path='', + loss='MSE', train_mode='e2e', pretrained_model_path='', + map_decoder='deconv', map_recon_dim=64, freeze_core=False, + state_loss_weight=0.1, act_loss_weight=1.0, + loc_x_loss_weight=0.01, loc_y_loss_weight=0.1, loc_angle_loss_weight=10.0, + loc_decoder_type='joint', decoders_to_reset='') + + model = GPT(mconf, device) + # model=torch.nn.DataParallel(model) + + if len(saved_model_path_action)>3: # some small number, path must have more + checkpoint = torch.load(saved_model_path_action, map_location=device) + # old code for loading model + model.load_state_dict(checkpoint['state_dict'], strict=False) + # new code for loading mode + # new_checkpoint = OrderedDict() + # for key in checkpoint['state_dict'].keys(): + # new_checkpoint[key.split("module.",1)[1]] = checkpoint['state_dict'][key] + # model.load_state_dict(new_checkpoint) + + # ckpt = torch.load('/home/rb/downloaded_models/epoch30.pth.tar')['state_dict'] + # for key in ckpt: + # print('********',key) + # model.load_state_dict(torch.load('/home/rb/downloaded_models/epoch30.pth.tar')['state_dict'], strict=True) + + model.eval() + # model.half() + model.to(device) + + # inputs = [torch_tensorrt.Input( + # states_shape=[1, self.clip_len, 200*200], + # actions_shape=[1, self.clip_len , 1], + # targets_shape=[1, self.clip_len , 1], + # timesteps_shape=[1, 1, 1], + # dtype=torch.half, + # )] + # enabled_precisions = {torch.float, torch.half} + # trt_ts_module = torch_tensorrt.compile(model, inputs=inputs, enabled_precisions=enabled_precisions) + + self.model = model + print("Finished loading model") + + # mapping model + if self.use_map: + + saved_map_model_path = rospy.get_param("~model_path_map", '') + + mconf_map = GPTConfig(block_size, max_timestep, + n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, use_pred_action=True, + state_tokenizer='pointnet', pretrained_encoder_path='', + loss='MSE', train_mode='map', pretrained_model_path='', + map_decoder='deconv', map_recon_dim=64, freeze_core=False, + state_loss_weight=0.1, + loc_x_loss_weight=0.01, loc_y_loss_weight=0.1, loc_angle_loss_weight=10.0, + loc_decoder_type='joint') + map_model = GPT(mconf_map, device) + # map_model=torch.nn.DataParallel(map_model) + + checkpoint = torch.load(saved_map_model_path, map_location=device) + + # old code for loading model + map_model.load_state_dict(checkpoint['state_dict']) + # new code for loading mode + # new_checkpoint = OrderedDict() + # for key in checkpoint['state_dict'].keys(): + # new_checkpoint[key.split("module.",1)[1]] = checkpoint['state_dict'][key] + # map_model.load_state_dict(new_checkpoint) + + map_model.eval() + map_model.to(device) + self.map_model = map_model + rate_map_display = 1.0 + + + # localization model + if self.use_loc: + + saved_loc_model_path = rospy.get_param("~model_path_loc", '') + + mconf_loc = GPTConfig(block_size, max_timestep, + n_layer=self.n_layers, n_head=8, n_embd=128, model_type='GPT', use_pred_state=True, + state_tokenizer='pointnet', pretrained_encoder_path='', + loss='MSE', train_mode='loc', pretrained_model_path='', + map_decoder='deconv', map_recon_dim=64, freeze_core=False, + state_loss_weight=0.1, + loc_x_loss_weight=0.01, loc_y_loss_weight=0.1, loc_angle_loss_weight=10.0, + loc_decoder_type='joint') + loc_model = GPT(mconf_loc, device) + # map_model=torch.nn.DataParallel(map_model) + + checkpoint = torch.load(saved_loc_model_path, map_location=device) + + # old code for loading model + loc_model.load_state_dict(checkpoint['state_dict']) + # new code for loading mode + # new_checkpoint = OrderedDict() + # for key in checkpoint['state_dict'].keys(): + # new_checkpoint[key.split("module.",1)[1]] = checkpoint['state_dict'][key] + # loc_model.load_state_dict(new_checkpoint) + + loc_model.eval() + loc_model.to(device) + self.loc_model = loc_model + rate_loc_display = 20 + + + + self.small_queue = Queue(maxsize = self.clip_len) # stores current scan, action, pose. up to 16 elements + self.large_queue = Queue() # stores current scan, action, pose. no limit of elements + + self.last_action = self.default_angle + self.new_scan_arrived = False + self.compute_network_action = False + self.compute_network_loc = False + self.has_loc_anchor = False + self.did_reset = False + + # parameters for model evaluation + self.reset_counter = 0 + self.last_reset_time = time.time() + self.distance_so_far = 0.0 + self.time_so_far = 0.0 + self.file_name = os.path.join(self.out_path,'info.csv') + + # define timer callbacks: + if self.use_map: + self.map_viz_timer = rospy.Timer(rospy.Duration(1.0 / rate_map_display), self.map_viz_cb) + if self.use_loc: + self.map_viz_loc = rospy.Timer(rospy.Duration(1.0 / rate_loc_display), self.loc_viz_cb) + + + + + + + def start(self): + self.logger.info("Starting RHController") + self.setup_pub_sub() + self.rhctrl = self.load_controller() + self.find_allowable_pts() # gets the allowed halton points from the map + + self.ready_event.set() + + rate_hz = 50 + rate = rospy.Rate(rate_hz) + self.logger.info("Initialized") + + # set initial pose for the car in the very first time in an allowable region + if self.map_type == 'train': + self.send_initial_pose() + else: + self.send_initial_pose_12f() + + self.time_started = rospy.Time.now() + + # wait until we actually have a car pose + rospy.loginfo("Waiting to receive pose") + while not rospy.is_shutdown() and self.inferred_pose is None: + pass + rospy.loginfo("Vehicle pose received") + + while not rospy.is_shutdown() and self.run: + + # check if we should reset the vehicle if crashed + if not self.is_real_deployment: + if self.check_reset(rate_hz): + rospy.loginfo("Resetting the car's position") + + # publish next action + if self.compute_network_action: + # don't have to run the network at all times, only when scans change and scans are full + network_action = self.apply_network() + self.last_action = network_action + # self.last_action = 0.0 + # if self.last_action<0: + # self.last_action *= 1.2 + # rospy.loginfo("Applied network: "+str(self.last_action)) + self.compute_network_action = False + self.publish_vel_marker() + + self.publish_traj(self.default_speed, self.last_action) + + # if map is not None: + rate.sleep() + + def map_viz_cb(self, timer): + + # find the middle pose for plotting reference + self.small_queue_lock.acquire() + pos_queue_list = list(self.small_queue.queue) + self.small_queue_lock.release() + pos_size = len(pos_queue_list) + + if pos_size==16: + pose_mid = pos_queue_list[int(pos_size/2) -1][2] + # if not self.is_real_deployment: + # pose_mid = pos_queue_list[int(pos_size/2) -1][2] + # else: + # pose_mid = PoseStamped() + x_imgs, x_act, t = self.prepare_model_inputs(queue_type='small') + start = time.time() + # with torch.set_grad_enabled(False): + with torch.inference_mode(): + self.map_recon, _ = self.map_model(states=x_imgs, actions=x_act, targets=x_act, gt_map=None, timesteps=t, poses=None, compute_loss=False) + finished_map_network = time.time() + rospy.loginfo("map network delay: "+str(finished_map_network-start)) + + # publish the GT pose of the map center + self.pose_marker_pub.publish(self.create_position_marker(pose_mid)) + # publish the map itself + self.map_marker_pub.publish(self.create_map_marker(pose_mid)) + + def loc_viz_cb(self, timer): + + + if self.compute_network_loc is False or self.time_sent_reset is None: + return + + # create anchor pose for localization + if time.time()-self.time_sent_reset>3.0 and self.did_reset is True: + self.loc_counter = 0 + self.has_loc_anchor = False + self.large_queue_lock.acquire() + self.large_queue = Queue() # reset the queue as well, we can't mix unfinished elements from previous run + self.large_queue_lock.release() + self.did_reset = False + rospy.logwarn("Resetting the loc position") + + self.large_queue_lock.acquire() + large_queue_list = copy.deepcopy(list(self.large_queue.queue)) + self.large_queue_lock.release() + large_queue_len = len(large_queue_list) + + # set the anchor and equal to the first reference when we count 16 scans after reset + if large_queue_len>=self.clip_len and self.has_loc_anchor is False: + rospy.logwarn("Setting the loc anchor position when completed 16 scans") + self.pose_anchor = copy.deepcopy(self.curr_pose) + self.current_frame = copy.deepcopy(self.pose_anchor) + self.has_loc_anchor = True + + if large_queue_len>=self.clip_len and self.has_loc_anchor is True and self.compute_network_loc is True: + x_imgs, x_act, t = self.prepare_model_inputs(queue_type='large') + # remove the oldest element from the large queue + self.large_queue_lock.acquire() + self.large_queue.get() + self.large_queue_lock.release() + + start = time.time() + # with torch.set_grad_enabled(False): + with torch.inference_mode(): + pose_preds, _, _, _, _ = self.loc_model(states=x_imgs, actions=x_act, targets=x_act, gt_map=None, timesteps=t, poses=None, compute_loss=False) + finished_loc_network = time.time() + rospy.loginfo("loc network delay: "+str(finished_loc_network-start)) + # publish anchor pose of the map center + self.loc_anchor_pose_marker_pub.publish(self.create_position_marker(self.pose_anchor, color=[0,0,1,1])) + # publish the current accumulated pose + # delta_pose_pred = pose_preds[0,self.clip_len-1,:].cpu().numpy() + delta_pose_pred = pose_preds[0,-1,:].cpu().numpy() + # calculate the change in coordinates + self.current_frame = self.transform_poses(self.current_frame, delta_pose_pred) + self.loc_current_pose_marker_pub.publish(self.create_position_marker(self.current_frame, color=[1,0,0,1])) + + + def transform_poses(self, current_pose, delta_pose_pred): + # elements of homogeneous matrix expressing point from local frame into world frame coords + current_angle = utilss.rosquaternion_to_angle(current_pose.pose.orientation) + R = np.array([[np.cos(current_angle),-np.sin(current_angle)], + [np.sin(current_angle),np.cos(current_angle)]]) + t = np.array([[current_pose.pose.position.x], + [current_pose.pose.position.y]]) + T = np.array([[R[0,0],R[0,1],t[0,0]], + [R[1,0],R[1,1],t[1,0]], + [0,0,1]]) + # now transform the position of the next point from local to world frame + pose_local = np.array([[delta_pose_pred[0]], + [delta_pose_pred[1]], + [1]]) + pose_world = np.matmul(T, pose_local) + current_pose.pose.position.x = pose_world[0,0] + current_pose.pose.position.y = pose_world[1,0] + current_angle += delta_pose_pred[2] + current_pose.pose.orientation = utilss.angle_to_rosquaternion(current_angle) + return current_pose + + def calc_stamped_poses(self, pos1, pose_f, pose_semi_f): + pos1.pose.position.x = pos1.pose.position.x + pose_f[0] - pose_semi_f[0] + pos1.pose.position.y = pos1.pose.position.y + pose_f[1] - pose_semi_f[1] + angle_f = math.atan2(pose_f[3], pose_f[2]) + angle_semi_f = math.atan2(pose_semi_f[3], pose_semi_f[2]) + orig_angle = utilss.rosquaternion_to_angle(pos1.pose.orientation) + pos1.pose.orientation = utilss.angle_to_rosquaternion(orig_angle+angle_f-angle_semi_f) + return pos1 + + def sum_stamped_poses(self, pos1, pose_pred): + pos1.pose.position.x += pose_pred[0] + pos1.pose.position.y += pose_pred[1] + angle = math.atan2(pose_pred[3], pose_pred[2]) + orig_angle = utilss.rosquaternion_to_angle(pos1.pose.orientation) + pos1.pose.orientation = utilss.angle_to_rosquaternion(orig_angle+angle) + return pos1 + + def create_map_marker(self, pose_stamped): + + start = time.time() + marker = Marker() + marker.header.frame_id = "/map" + marker.header.stamp = rospy.Time.now() + marker.type = 8 # points + marker.id = 0 + + # Set the scale of the marker + marker.scale.x = 0.1 + marker.scale.y = 0.1 + + map_recon = self.map_recon.cpu().numpy()[0,0,:] + [w,h] = map_recon.shape + + m_per_px = 12.0/64.0 + + # iterate over all pixels from the image to create the map in points + # only do this for the very first time. then skip because rel pos is the same + if self.points_viz_list is None: + self.points_viz_list = [] + for i in range(w): + for j in range(h): + p = Point() + p.x = +6.0 - i*m_per_px + p.y = +6.0 - j*m_per_px + p.z = 0.0 + self.points_viz_list.append(p) + marker.points = self.points_viz_list + + finished_points = time.time() + rospy.loginfo("points delay: "+str(finished_points-start)) + + # loop to figure out the colors for each point + for i in range(w): + for j in range(h): + cell_val = map_recon[i,j] + if cell_val < 0.2: + alpha = 0.0 + else: + alpha = 0.7 + color = ColorRGBA(r=0.0, g=min(max(cell_val,0.0),1.0), b=0.0, a=alpha) + marker.colors.append(color) + + finished_colors = time.time() + rospy.loginfo("color delay: "+str(finished_colors-finished_points)) + + # Set the pose of the marker + if pose_stamped is not None: + marker.pose = pose_stamped.pose + return marker + + def create_position_marker(self, pose_stamped, color=[0,1,0,1]): + marker = Marker() + marker.header.frame_id = "/map" + marker.header.stamp = rospy.Time.now() + marker.type = 0 # arrow + marker.id = 0 + + # Set the scale of the marker + marker.scale.x = 1 + marker.scale.y = 0.1 + marker.scale.z = 0.1 + + # Set the color + marker.color.r = color[0] + marker.color.g = color[1] + marker.color.b = color[2] + marker.color.a = color[3] + + # Set the pose of the marker + if pose_stamped is not None: + marker.pose = pose_stamped.pose + return marker + + + def publish_vel_marker(self): + marker = Marker() + marker.header.frame_id = "/car/base_link" + marker.header.stamp = rospy.Time.now() + marker.type = 0 # arrow + marker.id = 0 + + # Set the scale of the marker + marker.scale.x = 1 + marker.scale.y = 0.1 + marker.scale.z = 0.1 + + # Set the color + marker.color.r = 1.0 + marker.color.g = 0.0 + marker.color.b = 0.0 + marker.color.a = 1.0 + + # set the first point + # point_start = Point() + # point_start.x = point_start.y = point_start.z = 0.0 + # marker.points.append(point_start) + + # l = 5.0 + # point_end = Point() + # point_end.x = l*np.cos(self.last_action) + # point_end.y = l*np.sin(self.last_action) + # point_end.z = 0.0 + # marker.points.append(point_end) + + # Set the pose of the marker + marker.pose.position.x = 0.32 + marker.pose.position.y = 0 + marker.pose.position.z = 0 + marker.pose.orientation = utilss.angle_to_rosquaternion(self.last_action) + # marker.pose.orientation.x = 0.0 + # marker.pose.orientation.y = 0.0 + # marker.pose.orientation.z = 0.0 + # marker.pose.orientation.w = 1.0 + + self.vel_marker_pub.publish(marker) + + + def apply_network(self): + start_zero = time.time() + x_imgs, x_act, t = self.prepare_model_inputs(queue_type='small') + start = time.time() + # with torch.set_grad_enabled(False): + with torch.inference_mode(): + # action_pred = 0.0 + action_pred, _, _, _ = self.model(states=x_imgs, actions=x_act, targets=x_act, gt_map=None, timesteps=t, poses=None, compute_loss=False) + finished_action_network = time.time() + # rospy.loginfo("action network delay: "+str(finished_action_network-start)) + # self.act_inference_time_sum += finished_action_network-start + # self.act_inference_time_count += 1 + rospy.loginfo_throttle(10, "action network delay: "+str(finished_action_network-start)) + # rospy.loginfo_throttle(10, "AVG action network delay: "+str(self.act_inference_time_sum/self.act_inference_time_count)) + action_pred = action_pred[0,-1,0].cpu().flatten().item() + # if self.use_map: + # map_pred, loss = self.map_model(states=x_imgs, actions=x_act, targets=x_act, gt_map=None, timesteps=t, poses=None) + # finished_map_network = time.time() + # rospy.loginfo("map network delay: "+str(finished_map_network-finished_action_network)) + finished_network = time.time() + # rospy.loginfo("network delay total: "+str(finished_network-start_zero)) + # de-normalize + action_pred = pre.denorm_angle(action_pred) + return action_pred + + + def prepare_model_inputs(self, queue_type): + # start = time.time() + # organize the scan input + + x_imgs = torch.zeros(1,self.clip_len,self.nx,self.ny) + + x_act = torch.zeros(1,self.clip_len) + if queue_type=='small': + self.small_queue_lock.acquire() + queue_list = list(self.small_queue.queue) + self.small_queue_lock.release() + elif queue_type=='large': + self.large_queue_lock.acquire() + queue_list = list(self.small_queue.queue)[:self.clip_len] + self.large_queue_lock.release() + for idx, element in enumerate(queue_list): + x_imgs[0,idx,:] = torch.tensor(element[0]) + x_act[0,idx] = torch.tensor(pre.norm_angle(element[1])) + x_imgs = x_imgs.contiguous().view(1, self.clip_len, 200*200) + x_imgs = x_imgs.to(self.device) + x_act = x_act.view(1, self.clip_len , 1) + x_act = x_act.to(self.device) + t = torch.arange(0, self.clip_len).view(1,-1).to(self.device) + t = t.repeat(1,1) + # finish_processing = time.time() + # rospy.loginfo("processing delay: "+str(finish_processing-start)) + return x_imgs, x_act, t + + def check_reset(self, rate_hz): + # condition if the car gets stuck + if self.inferred_pose_prev() is not None and self.time_started is not None and self._time_of_inferred_pose is not None and self._time_of_inferred_pose_prev is not None: + # calculate distance traveled + delta_dist = np.linalg.norm(np.asarray(self.inferred_pose())-np.asarray(self.inferred_pose_prev())) + v = 2.0 # default value + if delta_dist < 0.5: + delta_time_poses = (self._time_of_inferred_pose-self._time_of_inferred_pose_prev).to_sec() + self.distance_so_far += delta_dist + self.time_so_far += delta_time_poses + # look at speed and termination condition + if delta_time_poses > 0.001: + v = delta_dist / delta_time_poses + # print('v = {}'.format(v)) + if v < 0.05 and rospy.Time.now().to_sec() - self.time_started.to_sec() > 1.0: + # this means that the car was supposed to follow a traj, but velocity is too low bc it's stuck + # first we reset the car pose + self.reset_counter +=1 + if self.reset_counter > 5 : + # save distance data to file and reset distance + delta_time = time.time() - self.last_reset_time + print("Distance: {} | Time: {} | Time so far: {}".format(self.distance_so_far, delta_time, self.time_so_far)) + with open(self.file_name,'a') as fd: + fd.write(str(self.distance_so_far)+','+str(self.time_so_far)+'\n') + if self.map_type == 'train': + self.send_initial_pose() + else: + self.send_initial_pose_12f() + rospy.loginfo("Got stuck, resetting pose of the car to default value") + msg = String() + msg.data = "got stuck" + self.expr_at_goal.publish(msg) + self.reset_counter = 0 + # new_line = np.array([self.distance_so_far, delta_time]) + # self.out_file = open(self.file_name,'ab') + # np.savetxt(self.out_file, new_line, delimiter=',') + # self.out_file.close() + self.distance_so_far = 0.0 + self.time_so_far = 0.0 + self.last_reset_time = time.time() + return True + else: + return False + + def send_initial_pose(self): + # sample a initial pose for the car based on the valid samples + hp_world_valid = self.hp_world[self.hp_zerocost_ids] + new_pos_idx = np.random.randint(0, hp_world_valid.shape[0]) + msg = PoseWithCovarianceStamped() + msg.header.stamp = rospy.Time.now() + msg.header.frame_id = "map" + # msg.pose.pose.position.x = hp_world_valid[new_pos_idx][0] + # msg.pose.pose.position.y = hp_world_valid[new_pos_idx][1] + # msg.pose.pose.position.z = 0.0 + # quat = utilss.angle_to_rosquaternion(hp_world_valid[new_pos_idx][1]) + msg.pose.pose.position.x = 4.12211 + (np.random.rand()-0.5)*2.0*0.5 + msg.pose.pose.position.y = -7.49623 + (np.random.rand()-0.5)*2.0*0.5 + msg.pose.pose.position.z = 0.0 + quat = utilss.angle_to_rosquaternion(np.radians(68 + (np.random.rand()-0.5)*2.0*360)) # 360 instead of zero at the end + msg.pose.pose.orientation = quat + + self.did_reset = True + self.time_sent_reset = time.time() + + # # create anchor pose for localization + # self.pose_anchor = PoseStamped() + # self.pose_anchor.header = msg.header + # self.pose_anchor.pose = msg.pose.pose + # self.current_pose = copy.deepcopy(self.pose_anchor) + # self.has_loc_anchor = True + + self.pose_reset.publish(msg) + + def send_initial_pose_12f(self): + # sample a initial pose for the car based on the valid samples + hp_world_valid = self.hp_world[self.hp_zerocost_ids] + new_pos_idx = np.random.randint(0, hp_world_valid.shape[0]) + msg = PoseWithCovarianceStamped() + msg.header.stamp = rospy.Time.now() + msg.header.frame_id = "map" + # msg.pose.pose.position.x = hp_world_valid[new_pos_idx][0] + # msg.pose.pose.position.y = hp_world_valid[new_pos_idx][1] + # msg.pose.pose.position.z = 0.0 + # quat = utilss.angle_to_rosquaternion(hp_world_valid[new_pos_idx][1]) + msg.pose.pose.position.x = -3.3559 + (np.random.rand()-0.5)*2.0*0.5 + msg.pose.pose.position.y = 4.511 + (np.random.rand()-0.5)*2.0*0.5 + msg.pose.pose.position.z = 0.0 + quat = utilss.angle_to_rosquaternion(np.radians(-83.115 + (np.random.rand()-0.5)*2.0*360)) # 360 instead of zero at the end + msg.pose.pose.orientation = quat + + self.did_reset = True + self.time_sent_reset = time.time() + + self.pose_reset.publish(msg) + + def shutdown(self, signum, frame): + rospy.signal_shutdown("SIGINT recieved") + self.run = False + for ev in self.events: + ev.set() + + def process_scan(self, msg): + scan = np.zeros((721), dtype=np.float) + scan[0] = msg.header.stamp.to_sec() + scan[1:] = msg.ranges + original_points, sensor_origins, time_stamps, pc_range, voxel_size, lo_occupied, lo_free = pre.load_params(scan, self.is_real_deployment) + vis_mat, nx, ny = pre.compute_bev_image(original_points, sensor_origins, time_stamps, pc_range, voxel_size) + # cv2.imshow('graycsale image',vis_mat) + # cv2.waitKey(1) + + if self.nx is None: + self.nx = nx + self.ny = ny + + return vis_mat + + def cb_scan(self, msg): + # new lidar scan arrived + + # first process all the information + processed_scan = self.process_scan(msg) + current_action = copy.deepcopy(self.last_action) + self.pos_lock.acquire() + current_pose_gt = copy.deepcopy(self.curr_pose) + self.pos_lock.release() + current_pose_pred = copy.deepcopy(self.current_frame) + queue_element = [processed_scan, current_action, current_pose_gt, current_pose_pred] + + # update the small queue + self.small_queue_lock.acquire() + if self.small_queue.full(): + self.small_queue.get() # remove the oldest element, will be replaced next + self.small_queue.put(queue_element) + self.small_queue_lock.release() + + # update the large queue. won't remove any elements, only increment + self.large_queue_lock.acquire() + self.large_queue.put(queue_element) + self.large_queue_lock.release() + + # control flags for other processes are activated now that queues have been updated + self.new_scan_arrived = True + self.compute_network_action = True + self.compute_network_loc = True + self.loc_counter += 1 + + + def setup_pub_sub(self): + rospy.Service("~reset/soft", SrvEmpty, self.srv_reset_soft) + rospy.Service("~reset/hard", SrvEmpty, self.srv_reset_hard) + + car_name = self.params.get_str("car_name", default="car") + + rospy.Subscriber( + "/" + car_name + "/" + 'scan', + LaserScan, + self.cb_scan, + queue_size=10, + ) + + rospy.Subscriber( + "/" + car_name + "/" + rospy.get_param("~inferred_pose_t"), + PoseStamped, + self.cb_pose, + queue_size=10, + ) + + self.rp_ctrls = rospy.Publisher( + "/" + + car_name + + "/" + + self.params.get_str( + "ctrl_topic", default="mux/ackermann_cmd_mux/input/navigation" + ), + AckermannDriveStamped, + queue_size=2, + ) + + self.vel_marker_pub = rospy.Publisher("/model_action_marker", Marker, queue_size = 1) + + # markers for mapping visualization + self.pose_marker_pub = rospy.Publisher("/pose_marker", Marker, queue_size = 1) + self.map_marker_pub = rospy.Publisher("/map_marker", Marker, queue_size = 1) + + # markers for localization visualization + self.loc_anchor_pose_marker_pub = rospy.Publisher("/loc_anchor_pose_marker", Marker, queue_size = 1) + self.loc_current_pose_marker_pub = rospy.Publisher("/loc_current_pose_marker", Marker, queue_size = 1) + + self.pose_reset = rospy.Publisher("/initialpose", PoseWithCovarianceStamped, queue_size=1) + + traj_chosen_t = self.params.get_str("traj_chosen_topic", default="~traj_chosen") + self.traj_chosen_pub = rospy.Publisher(traj_chosen_t, Marker, queue_size=10) + + # For the experiment framework, need indicators to listen on + self.expr_at_goal = rospy.Publisher("experiments/finished", String, queue_size=1) + + # to publish the new goal, for visualization + self.goal_pub = rospy.Publisher("~goal", Marker, queue_size=10) + + def srv_reset_hard(self, msg): + """ + Hard reset does a complete reload of the controller + """ + rospy.loginfo("Start hard reset") + self.reset_lock.acquire() + self.load_controller() + self.goal_event.clear() + self.reset_lock.release() + rospy.loginfo("End hard reset") + return [] + + def srv_reset_soft(self, msg): + """ + Soft reset only resets soft state (like tensors). No dependencies or maps + are reloaded + """ + rospy.loginfo("Start soft reset") + self.reset_lock.acquire() + self.rhctrl.reset() + self.goal_event.clear() + self.reset_lock.release() + rospy.loginfo("End soft reset") + return [] + + def find_allowable_pts(self): + self.hp_map, self.hp_world = self.rhctrl.cost.value_fn._get_halton_pts() + self.hp_zerocost_ids = np.zeros(self.hp_map.shape[0], dtype=bool) + for i, pts in enumerate(self.hp_map): + pts = pts.astype(np.int) + if int(pts[0])