-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathtrain_unconditional.py
217 lines (158 loc) · 6.75 KB
/
train_unconditional.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
from time import time
import torch
from FrEIA.framework import *
from FrEIA.modules import *
import monitoring
# Load training configuration, model and data set
# from configs.fourier_curve.unconditional_inn_1 import c, model, model_inverse
# from configs.fourier_curve.unconditional_inn_2 import c, model, model_inverse
# from configs.fourier_curve.unconditional_hint_1_full import c, model, model_inverse
# from configs.fourier_curve.unconditional_hint_2_full import c, model, model_inverse
# from configs.plus_shape.unconditional_inn_4 import c, model, model_inverse
# from configs.plus_shape.unconditional_inn_8 import c, model, model_inverse
# from configs.plus_shape.unconditional_inn_16 import c, model, model_inverse
# from configs.plus_shape.unconditional_inn_32 import c, model, model_inverse
# from configs.uci_data.power_hint_4 import c, model, model_inverse
# from configs.uci_data.power_hint_8 import c, model, model_inverse
# from configs.uci_data.power_inn_4 import c, model, model_inverse
# from configs.uci_data.power_inn_8 import c, model, model_inverse
# from configs.uci_data.gas_hint_4 import c, model, model_inverse
# from configs.uci_data.gas_hint_8 import c, model, model_inverse
# from configs.uci_data.gas_inn_4 import c, model, model_inverse
# from configs.uci_data.gas_inn_8 import c, model, model_inverse
# from configs.uci_data.miniboone_hint_4 import c, model, model_inverse
# from configs.uci_data.miniboone_hint_8 import c, model, model_inverse
# from configs.uci_data.miniboone_inn_4 import c, model, model_inverse
# from configs.uci_data.miniboone_inn_8 import c, model, model_inverse
def save(c, name):
torch.save({#'opt': optim.state_dict(),
'net': c.model.state_dict()}, name)
def load(c, name):
state_dicts = torch.load(name)
c.model.load_state_dict(state_dicts['net'])
# try:
# optim.load_state_dict(state_dicts['opt'])
# except ValueError:
# print('Cannot load optimizer for some reason or other')
def save_sample(c, N=500):
# create sample from saved model
try:
load(c, f'output/{c.suffix}.pt')
except:
pass
c.model.eval()
with torch.no_grad():
z_sample = torch.cuda.FloatTensor(N, c.ndim_z).normal_()
y_sample = torch.tensor(y).expand(N, c.ndim_y).cuda()
x_sample = c.model_inverse(y_sample, z_sample)
np.save(f'output/samples/{c.suffix}_sample-{N}', x_sample.cpu())
def evaluate(c):
with torch.no_grad():
c.model.eval()
loader = c.test_loader
batch_idx = 0
loss_history = []
for x, y in loader:
batch_losses = []
batch_idx += 1
if batch_idx > c.max_batches_per_epoch > 0: break
x = x.to(c.device)
x += 0.01 * torch.randn_like(x)
# Forward pass
z = c.model(x)
log_jacobian = c.model.log_jacobian(x, run_forward=False)
# Maximum likelihood loss terms
batch_losses.append(0.5 * torch.sum(z**2, dim=1).mean())
batch_losses.append(-log_jacobian.mean())
# Add up all losses
loss_total = sum(batch_losses)
loss_history.append([l.item() for l in batch_losses])
return np.mean(loss_history, axis=0).sum()
def train_epoch(c, optim, i_epoch, vis_latent, test=False):
if not test:
c.model.train()
loader = c.train_loader
if test:
c.model.eval()
loader = c.test_loader
nograd = torch.no_grad()
nograd.__enter__()
batch_idx = 0
loss_history = []
for x, y in loader:
optim.zero_grad()
batch_losses = []
batch_idx += 1
if batch_idx > c.max_batches_per_epoch > 0: break
x = x.to(c.device)
x += 0.01 * torch.randn_like(x)
# Forward pass
z = c.model(x)
log_jacobian = c.model.log_jacobian(x, run_forward=False)
# Maximum likelihood loss terms
batch_losses.append(0.5 * torch.sum(z**2, dim=1).mean())
batch_losses.append(-log_jacobian.mean())
# Add up all losses
loss_total = sum(batch_losses)
loss_history.append([l.item() for l in batch_losses])
# Compute gradients
if not test:
loss_total.backward()
# Clamp gradients
for p in c.model.params_trainable:
p.grad.data.clamp_(-5.00, 5.00)
# Parameter update
optim.step()
# Update progress bar
monitoring.visualizer.update_progress(batch_idx, i_epoch+1)
if test:
# Update plots
latent_sample = z[:500,:].data.cpu().numpy()
with torch.no_grad():
vis_x = c.model_inverse(vis_latent).data.cpu().numpy()
monitoring.visualizer.update_plots(latent_sample, vis_x)
nograd.__exit__(None, None, None)
return np.mean(loss_history, axis=0)
def main(c):
loss_labels = ['-log p(z)', '-log |det(J)|']
# Init trainable model parameters
if c.init_scale > 0:
for p in c.model.params_trainable:
p.data = c.init_scale * torch.randn_like(p.data)
# Count total number of trainable parameters
n_model_params = sum([p.numel() for p in c.model.params_trainable])
print(f'\nModel {c.suffix} has {n_model_params:,} trainable parameters.\n')
# assert False
# Prepare optimizer and learning rate schedule
optim = torch.optim.Adam(c.model.params_trainable, lr=c.lr_init,
betas=c.adam_betas, eps=1e-4,
weight_decay=c.l2_weight_reg)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1,
gamma=(c.final_decay)**(1./c.n_epochs))
# For visualization
vis_batchsize = 300
vis_latent = torch.randn(vis_batchsize, c.ndim_x).to(c.device)
monitoring.restart(c, loss_labels)
# monitoring.visualizer.print_config()
t_start = time()
try:
for i_epoch in range(c.n_epochs):
if i_epoch < c.pre_low_lr:
for param_group in optim.param_groups:
param_group['lr'] = c.lr_init * 3e-2
train_losses = train_epoch(c, optim, i_epoch, vis_latent)
test_losses = train_epoch(c, optim, i_epoch, vis_latent, test=True)
monitoring.visualizer.update_losses(np.concatenate([train_losses, test_losses]),
lr_scheduler.get_lr(), logscale=False)
lr_scheduler.step()
# save(c, f'output/{c.suffix}.pt')
except:
# save(c, f'output/{c.suffix}.pt' + '_ABORT')
raise
finally:
print("\n\nTraining took %f minutes\n\n" % ((time()-t_start)/60.))
return test_losses.sum()
if __name__ == "__main__":
pass
# main(c)
# save_sample(c)