forked from manicman1999/StyleGAN2-Tensorflow-2.0
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathconv_mod.py
126 lines (106 loc) · 4.96 KB
/
conv_mod.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# -*- coding: utf-8 -*-
"""Convolutional layers.
"""
from tensorflow.keras import backend as K
from tensorflow.keras import initializers, regularizers, constraints
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.python.keras.utils import conv_utils
import tensorflow as tf
class Conv2DMod(Layer):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
dilation_rate=1,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
demod=True,
**kwargs):
super(Conv2DMod, self).__init__(**kwargs)
self.filters = filters
self.rank = 2
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.demod = demod
self.input_spec = [InputSpec(ndim = 4),
InputSpec(ndim = 2)]
def build(self, input_shape):
channel_axis = -1
if input_shape[0][channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[0][channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
if input_shape[1][-1] != input_dim:
raise ValueError('The last dimension of modulation input should be equal to input dimension.')
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
# Set input spec.
self.input_spec = [InputSpec(ndim=4, axes={channel_axis: input_dim}),
InputSpec(ndim=2)]
self.built = True
def call(self, inputs):
#To channels first
x = tf.transpose(inputs[0], [0, 3, 1, 2])
#Get weight and bias modulations
#Make sure w's shape is compatible with self.kernel
w = K.expand_dims(K.expand_dims(K.expand_dims(inputs[1], axis = 1), axis = 1), axis = -1)
#Add minibatch layer to weights
wo = K.expand_dims(self.kernel, axis = 0)
#Modulate
weights = wo * (w+1)
#Demodulate
if self.demod:
d = K.sqrt(K.sum(K.square(weights), axis=[1,2,3], keepdims = True) + 1e-8)
weights = weights / d
#Reshape/scale input
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1])
x = tf.nn.conv2d(x, w,
strides=self.strides,
padding="SAME",
data_format="NCHW")
# Reshape/scale output.
x = tf.reshape(x, [-1, self.filters, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
x = tf.transpose(x, [0, 2, 3, 1])
return x
def compute_output_shape(self, input_shape):
space = input_shape[0][1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'demod': self.demod
}
base_config = super(Conv2DMod, self).get_config()
return dict(list(base_config.items()) + list(config.items()))