Skip to content

Instantly share code, notes, and snippets.

@yvki
Created April 14, 2024 05:07
Show Gist options
  • Save yvki/06ce11d5545c37f264c8f6bb7b611c34 to your computer and use it in GitHub Desktop.
Save yvki/06ce11d5545c37f264c8f6bb7b611c34 to your computer and use it in GitHub Desktop.
Simple algorithmic implementation of Neural Network Architecture ⚙️
# 1. Algorithm to normalize the training and testing data
def normalize_data(X_train, X_test):
mean_X_train = np.mean(X_train, axis=0)
std_X_train = np.std(X_train, axis=0)
X_train = np.divide((X_train - mean_X_train), std_X_train)
X_test = np.divide((X_test - mean_X_train), std_X_train)
return X_train, X_test
X_train, X_test = normalize_data(X_train, X_test)
print("First three lines of X_train:", X_train[:5])
print("\nFirst three lines of X_test:", X_test[:5])
# 2. Algorithm to create a simple MLP in PyTorch
import torch
X_train_tensor = torch.FloatTensor(X_train)
y_train_tensor = torch.LongTensor(y_train)
X_test_tensor = torch.FloatTensor(X_test)
y_test_tensor = torch.LongTensor(y_test)
from torch.utils.data import TensorDataset, DataLoader
# 2.1 Create TensorDatasets
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)
# 2.2 Create DataLoader
batch_size = 8
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
#2.3 Create and instantiate MLP class
import torch.nn as nn
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.layers = nn.Sequential(
nn.Linear(2, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 2)
)
def forward(self, x):
return self.layers(x)
model = MLP()
# 2.4 Set optimizer
import torch.optim as optim
optimizer = optim.Adagrad(model.parameters(), lr=0.001)
criterion = nn.CrossEntropyLoss()
# Training loop
num_epochs = 100
for epoch in range(num_epochs):
for inputs, labels in train_loader:
# Zero the gradients
optimizer.zero_grad()
# Forward pass
outputs = model(inputs)
# Calculate the loss in the form of y_pred, y_train
loss = criterion(outputs, labels)
# Backward pass
loss.backward()
# Update weights
optimizer.step()
if epoch % 20 == 0:
print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {loss.item()}')
# Sample output: Epoch 1/100, Loss: 0.7640491127967834 ... Epoch 81/100, Loss: 0.19598865509033203
# 3. Algorithm to evaluate the model's accuracy
model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in test_loader:
# Set the forward pass
outputs = model(inputs)
# Get the predicted values
_, predicted = torch.max(outputs, dim=1)
# Get the number of observations in batch
total += labels.size(0)
# Get the number of correct predictions
correct_predictions = (predicted == labels)
correct += correct_predictions.sum().item()
accuracy = correct / total
print(f'Test Accuracy: {accuracy * 100:.2f}%')
# 4. Algorithm to create a simple perceptron using sklearn
from sklearn.neural_network import MLPClassifier
hidden_layer_sizes = ()
solver = 'lbfgs'
max_iter = 1000
alpha = 0.001
random_state = 1
mlp_model = MLPClassifier(
hidden_layer_sizes=hidden_layer_sizes,
solver=solver,
max_iter=max_iter,
alpha=alpha,
random_state=random_state
)
mlp_model.fit(X_train, y_train)
# 4.1 Print coefficients of model
print(mlp_model.coefs_)
# 4.2 Print intercept of model
print(mlp_model.intercepts_)
# 4.3 Calculate model score based on testing data
print(mlp_model.score(X_test, y_test))
# 5. Alternative algorithm to create a simple perceptron using Python (vs 4.)
import numpy as np
class Perceptron:
def __init__(self):
self.weights = np.zeros((2, 1))
self.bias = np.zeros((1, 1))
def predict(self, inputs):
# Do the forward pass
weighted_sum = np.dot(inputs, self.weights) + self.bias
# Apply a step function (binary threshold) as the activation function
# The value of t of the step function should be 0
prediction = np.where(weighted_sum >= 0, 1, 0)
return prediction
def train(self, inputs, targets, learning_rate=0.001, epochs=1000):
for epoch in range(epochs):
for i in range(len(inputs)):
# Forward pass
prediction = self.predict(inputs[i:i+1])
# Compute the error
error = targets[i:i+1] - prediction
# Update weights and bias
self.weights += learning_rate * np.dot(inputs[i:i+1].T, error)
self.bias += learning_rate * error
perceptron = Perceptron()
# 5.1 Train the perceptron
perceptron.train(X_train, y_train, learning_rate=0.01, epochs=1000)
# 5.2 Make predictions
predictions = perceptron.predict(X_train)
correct = (predictions == y_train).sum().item()
print(correct / y_train.shape[0])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment