Skip to content

Commit

Permalink
Merge pull request #3 from SermetPekin/Weight
Browse files Browse the repository at this point in the history
naming and some new types and classes
  • Loading branch information
SermetPekin authored Dec 4, 2024
2 parents 65b61c2 + 7c61dc7 commit 6949eda
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 21 deletions.
11 changes: 11 additions & 0 deletions micrograd/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,14 @@ def __rtruediv__(self, other) -> 'Value': # other / self

def __repr__(self) -> str:
return f"Value(data={self.data}, grad={self.grad}, op={self._op})"


class Weight(Value):
def __init__(self, data: Number, _children: tuple = (), _op: str = '', regularization: str = 'none'):
super().__init__(data, _children, _op)
self.regularization = regularization


class Bias(Value):
def __init__(self, data: Number, _children: tuple = (), _op: str = ''):
super().__init__(data, _children, _op)
43 changes: 22 additions & 21 deletions micrograd/nn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import random
from typing import List, Union

from micrograd.engine import Value
from micrograd.engine import Value, Weight, Bias


class Module:
Expand All @@ -16,29 +16,29 @@ def parameters(self) -> List[Value]:

class Neuron(Module):

def __init__(self, nin: int, nonlin: bool = True):
self.w: List[Value] = [Value(random.uniform(-1, 1)) for _ in range(nin)]
self.b: Value = Value(0)
self.nonlin: bool = nonlin
def __init__(self, input_count: int, use_activation: bool = True):
self.weights: List[Weight] = [Weight(random.uniform(-1, 1)) for _ in range(input_count)]
self.bias: Bias = Bias(0)
self.use_activation: bool = use_activation

def __call__(self, x) -> Value:
act = sum((wi * xi for wi, xi in zip(self.w, x)), self.b)
return act.relu() if self.nonlin else act
def __call__(self, inputs: List[Value]) -> Value:
weighted_sum = sum((weight * input_value for weight, input_value in zip(self.weights, inputs)), self.bias)
return weighted_sum.relu() if self.use_activation else weighted_sum

def parameters(self) -> List[Value]:
return self.w + [self.b]
return self.weights + [self.bias]

def __repr__(self) -> str:
return f"{'ReLU' if self.nonlin else 'Linear'}Neuron({len(self.w)})"

activation_type = "ReLU" if self.use_activation else "Linear"
return f"{activation_type}Neuron(input_count={len(self.weights)})"

class Layer(Module):

def __init__(self, nin: int, nout: int, **kwargs):
self.neurons = [Neuron(nin, **kwargs) for _ in range(nout)]
self.neurons: List[Neuron] = [Neuron(nin, **kwargs) for _ in range(nout)]

def __call__(self, x) -> List[Value]:
out = [n(x) for n in self.neurons]
def __call__(self, x) -> List[Value] | Value:
out: List[Value] = [n(x) for n in self.neurons]
return out[0] if len(out) == 1 else out

def parameters(self) -> List[Value]:
Expand All @@ -50,19 +50,20 @@ def __repr__(self) -> str:

class MLP(Module):

def __init__(self, nin: int, nouts: List[int]):
sz = [nin] + nouts
def __init__(self, input_size: int, layer_sizes: List[int]):
sizes: List[int] = [input_size] + layer_sizes
self.layers: List[Layer] = [
Layer(sz[i], sz[i + 1], nonlin=i != len(nouts) - 1) for i in range(len(nouts))
Layer(sizes[i], sizes[i + 1], nonlin=i != len(layer_sizes) - 1) for i in range(len(layer_sizes))
]

def __call__(self, x: List[Value]) -> Union[Value, List[Value]]:
def __call__(self, inputs: List[Value]) -> Union[Value, List[Value]]:
outputs = inputs
for layer in self.layers:
x = layer(x)
return x
outputs = layer(outputs)
return outputs

def parameters(self) -> List[Value]:
return [p for layer in self.layers for p in layer.parameters()]
return [param for layer in self.layers for param in layer.parameters()]

def __repr__(self) -> str:
return f"MLP of [{', '.join(str(layer) for layer in self.layers)}]"

0 comments on commit 6949eda

Please sign in to comment.