forked from karpathy/micrograd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexample2.py
44 lines (33 loc) · 1.15 KB
/
example2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Example: Training a simple MLP
import random
from typing import List
from micrograd.engine import Value
from micrograd.nn import MLP
import time
SECONDS_TO_WAIT = 0 # 0.1
# Create random data
inputs = [[random.uniform(-1, 1) for _ in range(3)] for _ in range(10)]
targets = [random.uniform(-1, 1) for _ in range(10)]
# Define the MLP
mlp: MLP = MLP(3, [4, 4, 1]) # 3 inputs, 2 hidden layers with 4 neurons each, 1 output
# Training loop
learning_rate = 0.01
if SECONDS_TO_WAIT > 0:
print(f'it will wait {SECONDS_TO_WAIT:0.01f} seconds to imitate the real calculation ')
time.sleep(2)
for epoch in range(100):
# time.sleep(SECONDS_TO_WAIT)
# Forward pass
predictions: List[Value] = [mlp(x) for x in inputs]
# Mean Squared Error
loss: Value = sum((pred - target) ** 2 for pred, target in zip(predictions, targets))
assert isinstance(loss, Value) , 'it is not Value '
# Zero gradients
for p in mlp.parameters():
p.grad = 0
# Backward pass
loss.backward()
# Gradient descent step
for p in mlp.parameters():
p.data -= learning_rate * p.grad
print(f"Epoch {epoch}, Loss: {loss.data:0.03f}")