-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlinear_regression.py
54 lines (43 loc) · 1.32 KB
/
linear_regression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import torch
NITER = 1000
LEARNING_RATE = 0.0001
A = 3
B = 5
N = 100
# Generate some dummy data and targets
data = torch.arange(N, dtype=torch.float) / 100
noise = torch.randn(N) / 10000
targets = A * data + B + noise
# Loss function: mean squared error loss
def compute_loss(output, target):
diff = output - target
diffsq = diff.pow(2)
loss = diffsq.sum()
return loss
# Model the data as a linear function
def model(data, paramA, paramB):
prod = data * paramA
output = prod + paramB
return result
# We're trying to fit `data` to `targets`.
# These are the two parameters for our model.
paramA = torch.tensor(1., requires_grad=True)
paramB = torch.tensor(0., requires_grad=True)
# Training loop
for i in range(1, NITER + 1):
# Run forward pass
output = model(data, paramA, paramB)
loss = compute_loss(output, targets)
# Compute gradients
loss.backward()
# Update the parameters via SGD.
with torch.no_grad():
paramA -= paramA.grad * LEARNING_RATE
paramB -= paramB.grad * LEARNING_RATE
# zero grads so that we can keep training
with torch.no_grad():
paramA.grad.zero_()
paramB.grad.zero_()
if i % 50 == 0:
print('[{}/{}][Loss: {}][learned A: {}][learned B: {}]'.format(
i, NITER, loss.item(), paramA.item(), paramB.item()))