-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathDQEval.py
159 lines (115 loc) · 4.2 KB
/
DQEval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from dqn import weights_init, Initializer, DQN, HuberLoss, ReplayMemory
from pycar_env import PyCar
from collections import namedtuple
import torch.nn as nn
import torch.nn.functional as F
import torch
import random
from collections import namedtuple
import sys
import math
import random
import shutil
import warnings
from torch.backends import cudnn
from tqdm import tqdm
import logging
import time
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
class DQNAgent:
def __init__(self):
# self.config = config
self.gamma = 0.4
# self.logger = logging.getLogger("DQNAgent")
self.screen_width = 600
# define models (policy and target)
self.policy_model = DQN()
self.target_model = DQN()
# define memory
self.memory = ReplayMemory()
# define loss
self.loss = HuberLoss()
# define optimizer
self.optim = torch.optim.Adam(self.policy_model.parameters(), lr=0.01)
# define environment
self.env = PyCar()#TODO
# self.cartpole = PyCar(self.screen_width)
# initialize counter
self.current_episode = 0
self.current_iteration = 0
self.episode_durations = []
self.batch_size = 1700
# set cuda flag
self.is_cuda = torch.cuda.is_available()
self.cuda = self.is_cuda
if self.cuda:
# print_cuda_statistics()
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.policy_model = self.policy_model.to(self.device)
self.target_model = self.target_model.to(self.device)
self.loss = self.loss.to(self.device)
# Initialize Target model with policy model state dict
self.target_model.load_state_dict(self.policy_model.state_dict())
self.target_model.eval()
self.savepath = "/home/sk002/Desktop/model/"
def run(self, num):
"""
This function will the operator
:return:
"""
self.policy_model.load_state_dict(torch.load(self.savepath+"policy_epoch"+num+".pth"))
self.target_model.load_state_dict(torch.load(self.savepath+"target_epoch"+num+".pth"))
try:
self.validate()
except KeyboardInterrupt as e:
print(e)
#self.logger.info("You have entered CTRL+C.. Wait to finalize")
def get_action(self, state):
"""
The action selection function, it either uses the model to choose an action or samples one uniformly.
:param state: current state of the model
:return:
"""
if self.cuda:
state = state.cuda()
with torch.no_grad():
return self.policy_model(state).max(1)[1].view(1, 1) # size (1,1)
def validate(self):
total = 200
reward_total = 0
for episode in range(total):
self.env.reset_game()
curr_state = torch.Tensor(self.env.get_state()).permute(2, 0, 1).unsqueeze(0)
while(1):
# time.sleep(0.1)
#episode_duration += 1
# select action
action = self.get_action(curr_state)
# perform action and get reward
# print(action)
images, reward, done,score = self.env.step(action.item())#TODO
if self.cuda:
reward = torch.Tensor([reward]).to(self.device)
else:
reward = torch.Tensor([reward]).to(self.device)
# assign next state
if done:
next_state = None
else:
next_state = torch.Tensor(images).permute(2, 0, 1).unsqueeze(0) #TODO
curr_state = next_state
if done:
reward_total+=score
# print(score)
break
print(reward_total/total)
# pass
if __name__=="__main__":
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
warnings.filterwarnings("ignore", category=UserWarning)
agent = DQNAgent()
agent.run(sys.argv[1])