forked from torch/cunn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSpatialMaxPoolingCUDA.cu
100 lines (81 loc) · 3.56 KB
/
SpatialMaxPoolingCUDA.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#include "SpatialPoolingCUDA/updateOutput.cu"
#include "SpatialPoolingCUDA/updateGradInput.cu"
static int cunn_SpatialMaxPoolingCUDA_updateOutput(lua_State *L)
{
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 4, 2, "4D (batch) tensor expected");
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
long batchSize = input->size[3];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
luaL_argcheck(L, THCudaTensor_isContiguous(input), 2, "input must be contiguous");
float *input_data = THCudaTensor_data(input);
THCudaTensor_resize4d(output, nInputPlane, nOutputRows, nOutputCols, batchSize);
float *output_data = THCudaTensor_data(output);
spatialMaxPooling_updateOutput<MaxPooler>
(input_data, output_data,
nInputPlane, nInputRows, nInputCols, batchSize,
nOutputRows, nOutputCols,
kH, kW,
0, dW);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPoolingCUDA.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_SpatialMaxPoolingCUDA_updateGradInput(lua_State *L)
{
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nInputPlane = input->size[0];
long batchSize = input->size[3];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
THCudaTensor_resizeAs(gradInput, input);
THCudaTensor_zero(gradInput);
float *input_data = THCudaTensor_data(input);
float *output_data = THCudaTensor_data(output);
float *gradOutput_data = THCudaTensor_data(gradOutput);
float *gradInput_data = THCudaTensor_data(gradInput);
spatialMaxPooling_updateGradInput
(input_data, gradOutput_data, output_data, gradInput_data,
nInputPlane, nInputRows, nInputCols, batchSize,
nOutputRows, nOutputCols,
kH, kW,
0, dW);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPoolingCUDA.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_SpatialMaxPoolingCUDA__ [] = {
{"SpatialMaxPoolingCUDA_updateOutput", cunn_SpatialMaxPoolingCUDA_updateOutput},
{"SpatialMaxPoolingCUDA_updateGradInput", cunn_SpatialMaxPoolingCUDA_updateGradInput},
{NULL, NULL}
};
static void cunn_SpatialMaxPoolingCUDA_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialMaxPoolingCUDA__, "nn");
lua_pop(L,1);
}