Skip to content

Commit

Permalink
new eval
Browse files Browse the repository at this point in the history
  • Loading branch information
rogeriobonatti committed Apr 16, 2022
1 parent 7d44674 commit d80e971
Show file tree
Hide file tree
Showing 4 changed files with 1,068 additions and 55 deletions.
12 changes: 10 additions & 2 deletions mushr_rhc_ros/launch/sim/sim_server_eval.launch
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,20 @@
<!-- <arg name="out_path" default="/home/rb/hackathon_data/e2e_eval/model_test" />
<arg name="model_path" default="/home/rb/hackathon_data/aml_outputs/log_output/normal-kingfish/GPTiros_e2e_8gpu_2022-02-17_1645120431.7528405_2022-02-17_1645120431.7528613/model/epoch10.pth.tar" /> -->

<arg name="out_path" default="/home/rb/hackathon_data/e2e_eval/model_test" />
<arg name="model_path" default="/home/rb/hackathon_data/aml_outputs/log_output/mushr_nips/GPTnips_8gpu_relu_e2e_2022-03-31_1648698123.8598492_2022-03-31_1648698123.8598652/model/epoch15.pth.tar" />
<!-- current model! -->
<!-- <arg name="out_path" default="/home/rb/hackathon_data/e2e_eval/model_test" />
<arg name="model_path" default="/home/rb/hackathon_data/aml_outputs/log_output/deep-adder/GPTnips_e2e_2022-04-06_1649220643.529554_2022-04-06_1649220643.5295672/model/epoch21.pth.tar" /> -->

<!-- <arg name="out_path" default="/home/rb/hackathon_data/e2e_eval/model_test" />
<arg name="model_path" default="/home/rb/hackathon_data/aml_outputs/log_output/renewed-colt/GPTnips_8gpu_relu_e2e_slow_bias_2022-04-02_1648869033.3693378_2022-04-02_1648869033.36935/model/epoch22.pth.tar" /> -->

<!-- <arg name="out_path" default="/home/rb/hackathon_data/e2e_eval/model_test" />
<arg name="model_path" default="/home/rb/hackathon_data/aml_outputs/log_output/mushr_nips/GPTnips_8gpu_relu_e2e_slow_2022-03-31_1648698385.6160257_2022-03-31_1648698385.6160388/model/epoch5.pth.tar" /> -->

<!-- current model! -->
<arg name="out_path" default="/home/rb/hackathon_data/e2e_eval/model_test" />
<arg name="model_path" default="/home/rb/hackathon_data/aml_outputs/log_output/tough-mongoose/GPTnips_e2e_2022-04-07_1649355173.7379868_2022-04-07_1649355173.7379994/model/epoch22.pth.tar" />

<group if="$(arg map_server)">
<include file="$(find mushr_rhc_ros)/launch/map_server.launch" />
</group>
Expand Down
100 changes: 66 additions & 34 deletions mushr_rhc_ros/src/mingpt/model_mushr_nips.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from base64 import encode
import math
import logging
from collections import OrderedDict

import torch
import torch.nn as nn
Expand Down Expand Up @@ -152,29 +153,32 @@ def __init__(self, config, device):
self.state_encoder = nn.Sequential(nn.Conv2d(1, 32, 8, stride=4, padding=0), nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=0), nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=0), nn.ReLU(),
nn.Flatten(), nn.Linear(36864, config.n_embd), nn.Tanh())
nn.Flatten(), nn.Linear(36864, config.n_embd), nn.ReLU())
elif config.state_tokenizer == 'resnet18':
self.state_encoder = nn.Sequential(resnet18_custom(pretrained=False, clip_len=1), nn.ReLU(),
nn.Linear(1000, config.n_embd), nn.Tanh())

self.ret_emb = nn.Sequential(nn.Linear(1, config.n_embd), nn.Tanh())

# add map decoder
encoded_feat_dim = (config.n_embd * config.block_size) // 2
if config.map_decoder == 'mlp':
#MLP map decoder
self.map_decoder = nn.Sequential(nn.Linear(encoded_feat_dim, 1024), nn.Tanh(),
nn.Linear(1024, 2048), nn.Tanh(),
nn.Linear(2048, 64*64), nn.Tanh())
self.map_decoder = nn.Sequential(nn.Linear(encoded_feat_dim, 1024),
nn.ReLU(),
nn.Linear(1024, 2048),
nn.ReLU(),
nn.Linear(2048, 64*64),
nn.Tanh())
elif config.map_decoder == 'deconv':
if self.map_recon_dim == 64:
# conv2d map decoder - original
self.map_decoder = nn.Sequential(nn.Linear(encoded_feat_dim, 4096), nn.Tanh(),
self.map_decoder = nn.Sequential(nn.Linear(encoded_feat_dim, 4096),
nn.ReLU(),
Reshape(16, 16, 16),
MapDecoder_2x_Deconv(16))
elif self.map_recon_dim == 128:
# conv2d map decoder - new trial
self.map_decoder = nn.Sequential(nn.Linear(encoded_feat_dim, 4096), nn.Tanh(),
self.map_decoder = nn.Sequential(nn.Linear(encoded_feat_dim, 4096),
nn.ReLU(),
Reshape(16, 16, 16),
MapDecoder_4x_Deconv128px(16))
else:
Expand Down Expand Up @@ -205,38 +209,50 @@ def __init__(self, config, device):
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 4)
nn.Linear(32, 3)
)

self.predict_state = nn.Sequential(
*([nn.Linear(config.n_embd, config.n_embd)] + ([nn.Tanh()] if action_tanh else []))
)

criterion = torch.nn.MSELoss(reduction='mean')
# self.criterion = criterion.cuda(device)
self.criterion = criterion.cuda(device)

self.load_pretrained_model_weights(config.pretrained_model_path)


def load_pretrained_model_weights(self, model_path):
if model_path:
ckpt = torch.load(model_path)['state_dict'] # COMPASS checkpoint format.
ckpt2 = {}
ckpt3 = {}
ckpt4 = {}
for key in ckpt:
print(key)
if key.startswith('blocks'):
ckpt2[key.replace('blocks.', '')] = ckpt[key]
if key.startswith('state_encoder'):
ckpt3[key.replace('state_encoder.', '')] = ckpt[key]
if key.startswith('action_embeddings'):
ckpt4[key.replace('action_embeddings.', '')] = ckpt[key]

self.blocks.load_state_dict(ckpt2)
self.state_encoder.load_state_dict(ckpt3)
self.action_embeddings.load_state_dict(ckpt4)
checkpoint = torch.load(model_path, map_location=self.device)
new_checkpoint = OrderedDict()
for key in checkpoint['state_dict'].keys():
new_checkpoint[key.split("module.",1)[1]] = checkpoint['state_dict'][key]
self.load_state_dict(new_checkpoint)
print('Successfully loaded pretrained checkpoint: {}.'.format(model_path))
# for key in ckpt:
# print(key)

# for param in self.parameters():
# print(param)

# ckpt = torch.load(model_path)['state_dict'] # COMPASS checkpoint format.
# ckpt2 = {}
# ckpt3 = {}
# ckpt4 = {}
# for key in ckpt:
# print(key)
# if key.startswith('blocks'):
# ckpt2[key.replace('blocks.', '')] = ckpt[key]
# if key.startswith('state_encoder'):
# ckpt3[key.replace('state_encoder.', '')] = ckpt[key]
# if key.startswith('action_embeddings'):
# ckpt4[key.replace('action_embeddings.', '')] = ckpt[key]

# self.blocks.load_state_dict(ckpt)
# self.state_encoder.load_state_dict(ckpt3)
# self.action_embeddings.load_state_dict(ckpt4)
# print('Successfully loaded pretrained checkpoint: {}.'.format(model_path))
else:
print('Train from scratch.')

Expand Down Expand Up @@ -314,7 +330,7 @@ def configure_optimizers(self, train_config):
return optimizer

# state, and action
def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, poses=None):
def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, poses=None, compute_loss=True):
# states: (batch, block_size, 4*84*84)
# actions: (batch, block_size, 1)
# targets: (batch, block_size, 1)
Expand Down Expand Up @@ -363,24 +379,40 @@ def forward(self, states, actions, targets=None, gt_map=None, timesteps=None, po
map_recon = self.map_decoder(feat)
elif self.config.train_mode == 'loc':
pose_preds = self.predict_pose(x[:, ::2, :])
pose_preds[:,:,2:] = torch.tanh(pose_preds[:,:,2:])
# pose_preds[:,:,2:] = torch.tanh(pose_preds[:,:,2:])
elif self.config.train_mode == 'joint':
action_preds = self.predict_action(x[:, ::2, :])
percep_feat = x[:, ::2, :]
B, N, D = percep_feat.shape
feat = percep_feat.reshape(B, -1) # reshape to a vector
map_recon = self.map_decoder(feat)
pose_preds = self.predict_pose(x[:, ::2, :])
# pose_preds[:,:,2:] = torch.tanh(pose_preds[:,:,2:])
else:
print('Not support!')


loss = None
if targets is not None:
if self.config.train_mode == 'map':
# loss = self.criterion(map_recon.reshape(-1, self.map_recon_dim, self.map_recon_dim), gt_map)
return map_recon, 0.0
if compute_loss:
loss = self.criterion(map_recon.reshape(-1, self.map_recon_dim, self.map_recon_dim), gt_map)
return map_recon, loss
elif self.config.train_mode == 'e2e':
# loss over N timesteps
# loss = self.criterion(actions, action_preds)
return action_preds, 0.0
if compute_loss:
loss = self.criterion(actions, action_preds)
return action_preds, loss
elif self.config.train_mode == 'loc':
# loss over N timesteps
# loss = self.criterion(poses, pose_preds)
return pose_preds, 0.0
if compute_loss:
loss_translation = self.criterion(poses[:,:,:2],pose_preds[:,:,:2])
loss_angle = self.criterion(poses[:,:,2],pose_preds[:,:,2])
# scale angle loss, similar to DeepVO paper (they use factor of 100, but car is moving faster)
loss = loss_translation + 10.0*loss_angle
return pose_preds, loss
elif self.config.train_mode == 'joint':
return action_preds, map_recon, pose_preds

class MapDecoder_4x_Deconv(nn.Module):
def __init__(self, in_channels=384):
Expand Down
Loading

0 comments on commit d80e971

Please sign in to comment.