-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcudaDemo.py
38 lines (26 loc) · 829 Bytes
/
cudaDemo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import torch
from torch import nn
from torch.autograd import Variable
useCuda = True if torch.cuda.is_available() else False
class myModel(nn.Module):
def __init__(self):
super(myModel,self).__init__()
self.conv = nn.Conv2d(3,2,3)
def forward(self, x):
return self.conv(x)
myNet = myModel()
myLoss = nn.CrossEntropyLoss()
myData = torch.FloatTensor(2,3,3,3)
# ALIAS: the type will be a cuda tensor if we have GPU, otherwise it will be a CPU tensor
myLongTensor = torch.cuda.LongTensor if useCuda else torch.LongTensor
result = myLongTensor(2)
result[1] = result[0] = 0
if useCuda:
myNet = myNet.cuda()
myData = myData.cuda()
myLoss = myLoss.cuda()
result = Variable(result)
myData = Variable(myData)
out = torch.squeeze(myNet(myData))
loss = myLoss(out,result)
print(loss)