-
Notifications
You must be signed in to change notification settings - Fork 28
/
Copy pathrun.py
176 lines (135 loc) · 5.98 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
sys.path.append('./')
sys.path.append('./poselib')
from utils.config import set_np_formatting, get_args, parse_sim_params, load_cfg
from utils.parse_task import parse_task
from rl_games.algos_torch import torch_ext
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.torch_runner import Runner
from models.im_models import ImitatorModel
from models.im_network_builder import ImitatorBuilder
from agents.im_agent import ImitatorAgent
from players.im_player import ImitatorPlayer
args = None
cfg = None
cfg_train = None
def create_rlgpu_env(**kwargs):
use_horovod = cfg_train['params']['config'].get('multi_gpu', False)
if use_horovod:
import horovod.torch as hvd
rank = hvd.rank()
print("Horovod rank: ", rank)
cfg_train['params']['seed'] = cfg_train['params']['seed'] + rank
args.device = 'cuda'
args.device_id = rank
args.rl_device = 'cuda:' + str(rank)
cfg['rank'] = rank
cfg['rl_device'] = 'cuda:' + str(rank)
sim_params = parse_sim_params(args, cfg, cfg_train)
task, env = parse_task(args, cfg, cfg_train, sim_params)
print(env.num_envs)
print(env.num_actions)
print(env.num_obs)
print(env.num_states)
frames = kwargs.pop('frames', 1)
if frames > 1:
env = wrappers.FrameStack(env, frames, False)
return env
class RLGPUAlgoObserver(AlgoObserver):
def __init__(self, use_successes=True):
self.use_successes = use_successes
return
def after_init(self, algo):
self.algo = algo
self.consecutive_successes = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.writer = self.algo.writer
return
def process_infos(self, infos, done_indices):
if isinstance(infos, dict):
if (self.use_successes == False) and 'consecutive_successes' in infos:
cons_successes = infos['consecutive_successes'].clone()
self.consecutive_successes.update(cons_successes.to(self.algo.ppo_device))
if self.use_successes and 'successes' in infos:
successes = infos['successes'].clone()
self.consecutive_successes.update(successes[done_indices].to(self.algo.ppo_device))
return
def after_clear_stats(self):
self.mean_scores.clear()
return
def after_print_stats(self, frame, epoch_num, total_time):
if not (args.tmp or args.no_log):
if self.consecutive_successes.current_size > 0:
mean_con_successes = self.consecutive_successes.get_mean()
self.algo.log_dict.update({'successes/consecutive_successes/mean': mean_con_successes})
return
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
self.use_global_obs = (self.env.num_states > 0)
self.full_state = {}
self.full_state["obs"] = self.reset()
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return
def step(self, action):
next_obs, reward, is_done, info = self.env.step(action)
# todo: improve, return only dictinary
self.full_state["obs"] = next_obs
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return self.full_state, reward, is_done, info
else:
return self.full_state["obs"], reward, is_done, info
def reset(self, env_ids=None):
self.full_state["obs"] = self.env.reset(env_ids)
if self.use_global_obs:
self.full_state["states"] = self.env.get_state()
return self.full_state
else:
return self.full_state["obs"]
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
if self.use_global_obs:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
vecenv.register('RLGPU', lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
'env_creator': lambda **kwargs: create_rlgpu_env(**kwargs),
'vecenv_type': 'RLGPU'})
def build_alg_runner(algo_observer):
runner = Runner(algo_observer)
runner.algo_factory.register_builder('pose_im_rnn', lambda **kwargs : ImitatorAgent(**kwargs)) # training agent
runner.player_factory.register_builder('pose_im_rnn', lambda **kwargs : ImitatorPlayer(**kwargs)) # testing agent
runner.model_builder.model_factory.register_builder('pose_im', lambda network, **kwargs : ImitatorModel(network)) # network wrapper
runner.model_builder.network_factory.register_builder('pose_im_rnn', lambda **kwargs : ImitatorBuilder()) # actuall network definition class
return runner
def main():
global args
global cfg
global cfg_train
set_np_formatting()
args = get_args()
cfg, cfg_train = load_cfg(args)
vargs = vars(args)
algo_observer = RLGPUAlgoObserver()
runner = build_alg_runner(algo_observer)
runner.load(cfg_train)
runner.reset()
runner.run(vargs)
return
if __name__ == '__main__':
main()