-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathblocks.py
74 lines (61 loc) · 3.47 KB
/
blocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
'''
Architectures taken from https://github.com/zalandoresearch/pytorch-vq-vae/blob/master/vq-vae.ipynb
'''
import torch
import torch.nn as nn
class Residual(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_hiddens):
super(Residual, self).__init__()
self._block = nn.Sequential(nn.ReLU(True),
nn.Conv2d(in_channels=in_channels, out_channels=num_residual_hiddens, kernel_size=3,
stride=1, padding=1, bias=False),
nn.ReLU(True),
nn.Conv2d(in_channels=num_residual_hiddens, out_channels=num_hiddens, kernel_size=1,
stride=1, bias=False))
def forward(self, x):
return x + self._block(x)
class ResidualStack(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(ResidualStack, self).__init__()
self._num_residual_layers = num_residual_layers
self._layers = nn.ModuleList(
[Residual(in_channels, num_hiddens, num_residual_hiddens) for _ in range(self._num_residual_layers)])
def forward(self, x):
for i in range(self._num_residual_layers):
x = self._layers[i](x)
return torch.relu(x)
class Encoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Encoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels, out_channels=num_hiddens // 2, kernel_size=4, stride=2,
padding=1)
self._conv_2 = nn.Conv2d(in_channels=num_hiddens // 2, out_channels=num_hiddens, kernel_size=4, stride=2,
padding=1)
self._conv_3 = nn.Conv2d(in_channels=num_hiddens, out_channels=num_hiddens, kernel_size=3, stride=1, padding=1)
self._residual_stack = ResidualStack(in_channels=num_hiddens, num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
def forward(self, inputs):
x = self._conv_1(inputs)
x = torch.relu(x)
x = self._conv_2(x)
x = torch.relu(x)
x = self._conv_3(x)
return self._residual_stack(x)
class Decoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):
super(Decoder, self).__init__()
self._conv_1 = nn.Conv2d(in_channels=in_channels, out_channels=num_hiddens, kernel_size=3, stride=1, padding=1)
self._residual_stack = ResidualStack(in_channels=num_hiddens, num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens)
self._conv_trans_1 = nn.ConvTranspose2d(in_channels=num_hiddens, out_channels=num_hiddens // 2, kernel_size=4,
stride=2, padding=1)
self._conv_trans_2 = nn.ConvTranspose2d(in_channels=num_hiddens // 2, out_channels=1, kernel_size=4, stride=2,
padding=1)
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._residual_stack(x)
x = self._conv_trans_1(x)
x = torch.relu(x)
return self._conv_trans_2(x)