forked from torch/nn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSpatialSubtractiveNormalization.lua
100 lines (84 loc) · 3.41 KB
/
SpatialSubtractiveNormalization.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
local SpatialSubtractiveNormalization, parent = torch.class('nn.SpatialSubtractiveNormalization','nn.Module')
function SpatialSubtractiveNormalization:__init(nInputPlane, kernel)
parent.__init(self)
-- get args
self.nInputPlane = nInputPlane or 1
self.kernel = kernel or torch.Tensor(9,9):fill(1)
local kdim = self.kernel:nDimension()
-- check args
if kdim ~= 2 and kdim ~= 1 then
error('<SpatialSubtractiveNormalization> averaging kernel must be 2D or 1D')
end
if (self.kernel:size(1) % 2) == 0 or (kdim == 2 and (self.kernel:size(2) % 2) == 0) then
error('<SpatialSubtractiveNormalization> averaging kernel must have ODD dimensions')
end
-- normalize kernel
self.kernel:div(self.kernel:sum() * self.nInputPlane)
-- padding values
local padH = math.floor(self.kernel:size(1)/2)
local padW = padH
if kdim == 2 then
padW = math.floor(self.kernel:size(2)/2)
end
-- create convolutional mean extractor
self.meanestimator = nn.Sequential()
self.meanestimator:add(nn.SpatialZeroPadding(padW, padW, padH, padH))
if kdim == 2 then
self.meanestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, self.kernel:size(2), self.kernel:size(1)))
else
self.meanestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane), self.kernel:size(1), 1))
self.meanestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, 1, self.kernel:size(1)))
end
self.meanestimator:add(nn.Replicate(self.nInputPlane))
-- set kernel and bias
if kdim == 2 then
for i = 1,self.nInputPlane do
self.meanestimator.modules[2].weight[1][i] = self.kernel
end
self.meanestimator.modules[2].bias:zero()
else
for i = 1,self.nInputPlane do
self.meanestimator.modules[2].weight[i]:copy(self.kernel)
self.meanestimator.modules[3].weight[1][i]:copy(self.kernel)
end
self.meanestimator.modules[2].bias:zero()
self.meanestimator.modules[3].bias:zero()
end
-- other operation
self.subtractor = nn.CSubTable()
self.divider = nn.CDivTable()
-- coefficient array, to adjust side effects
self.coef = torch.Tensor(1,1,1)
end
function SpatialSubtractiveNormalization:updateOutput(input)
-- compute side coefficients
if (input:size(3) ~= self.coef:size(3)) or (input:size(2) ~= self.coef:size(2)) then
local ones = input.new():resizeAs(input):fill(1)
self.coef = self.meanestimator:updateOutput(ones)
self.coef = self.coef:clone()
end
-- compute mean
self.localsums = self.meanestimator:updateOutput(input)
self.adjustedsums = self.divider:updateOutput{self.localsums, self.coef}
self.output = self.subtractor:updateOutput{input, self.adjustedsums}
-- done
return self.output
end
function SpatialSubtractiveNormalization:updateGradInput(input, gradOutput)
-- resize grad
self.gradInput:resizeAs(input):zero()
-- backprop through all modules
local gradsub = self.subtractor:updateGradInput({input, self.adjustedsums}, gradOutput)
local graddiv = self.divider:updateGradInput({self.localsums, self.coef}, gradsub[2])
self.gradInput:add(self.meanestimator:updateGradInput(input, graddiv[1]))
self.gradInput:add(gradsub[1])
-- done
return self.gradInput
end
function SpatialSubtractiveNormalization:type(type)
parent.type(self,type)
self.meanestimator:type(type)
self.divider:type(type)
self.subtractor:type(type)
return self
end