You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
fromutilsimportdataloadertrainloader=dataloader(x, y, batch_size=64, shuffle=True)
Defining Model Architecture, Optimizer, and Criterion/Loss Function
importeureka.nnasnnimporteureka.optimasoptimimporteureka.lossesaslosses# MNIST Dense network with 1-hidden layer of 256 neurons,# a BatchNorm after activation with learnable parameters,# and a Dropout layer with 0.5 probability of dropping neuronsmodel=nn.Sequential([
nn.Linear(784, 256),
nn.ReLU(),
nn.BatchNorm1d(256, affine=True),
nn.Dropout(0.2),
nn.Linear(256, 10),
nn.Softmax()
])
# Adam Optimizeroptimizer=optim.Adam(model, lr=0.0002)
# Define the criterion/loss functioncriterion=losses.CrossEntropyLoss()
Forward and Backpropagation
forinputs, labelsintrainloader:
# Forward Propagation and Compute lossout=model.forward(inputs)
m=inputs.shape[0]
batch_loss+=criterion(out, labels)
# Compute Loss and Model Gradientsback_var=criterion.backward()
model.backward(labels)
# Backward Prop using Optimizer stepoptimizer.step()
Example: MNIST Classification
importnumpyasnpfromeureka.utilsimportone_hot_encoder, dataloaderimporteureka.lossesaslossesimporteureka.optimasoptimimporteureka.nnasnnimportdatasets.mnist# Load dataset and Preprocesstrain_x, train_y=datasets.mnist.load_dataset(download=True, train=True)
x=train_x.reshape(train_x.shape[0], -1)
y=one_hot_encoder(train_y)
num_samples=x.shape[0]
# Prepare the dataloadertrainloader=dataloader(x, y, batch_size=64, shuffle=True)
# Define model architecture, Optimizer, and Criterion/Loss Functionmodel=nn.Sequential([
nn.Linear(784, 256),
nn.ReLU(),
nn.BatchNorm1d(256, affine=False),
nn.Dropout(0.2),
nn.Linear(256, 10),
nn.Softmax()
])
optimizer=optim.Adam(model, lr=0.0002)
criterion=losses.CrossEntropyLoss()
# Train loopnum_epochs=20forepochinrange(1, num_epochs+1):
print("Epoch: {}/{}\n==========".format(epoch, num_epochs))
acc=0batch_loss=0forinputs, labelsintrainloader:
# Forward Propagation and Compute lossout=model.forward(inputs)
m=inputs.shape[0]
batch_loss+=criterion(out, labels)
# Compute Accuracypred=np.argmax(out, axis=1).reshape(-1, 1)
acc+=np.sum(pred==labels.argmax(axis=1).reshape(-1,1))
# Compute Loss and Model Gradientsdloss_over_dout=criterion.backward()
model.backward(dloss_over_dout)
# Backward Prop using Optimizer stepoptimizer.step()
# Print Loss and Accuracyprint("Loss: {:.6f}".format(batch_loss/num_samples))
print("Accuracy: {:.2f}%\n".format(acc/num_samples*100))