Full Blog TOC

Full Blog Table Of Content with Keywords Available HERE

Sunday, August 11, 2024

Neural Networks Usage Examples


This post is based on the great course Intro to Deep Learning and Generative Models Course.

 In this post we include 3 types of neural networks usages:

  1. Regression
  2. Single class classification
  3. Multi class classification


Regression


import matplotlib.pyplot as plt
import numpy as np
import torch.nn


def main():
random_seed = 42
learning_rate = 0.01
number_of_epochs = 100
batch_size = 10
number_of_classes = 1

torch.manual_seed(random_seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = np.genfromtxt('input/regression.csv', delimiter=',')
data = torch.tensor(data, dtype=torch.float32)
data = data.to(device=device)

x = data[:, :-1]
y = data[:, -1:]

print('x shape', x.shape)
print('y shape', y.shape)
print('x first sample', x[0])
print('y first sample', y[0])

number_of_samples = x.shape[0]
number_of_features = x.shape[1]
train_factor = 0.7
train_size = int(train_factor * number_of_samples)
x_train, x_test = x[:train_size], x[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

print('x train shape', x_train.shape)
print('y train shape', y_train.shape)
print('x test shape', x_test.shape)
print('y test shape', y_test.shape)

model = torch.nn.Linear(in_features=number_of_features, out_features=number_of_classes)

model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

loss_function = torch.nn.functional.mse_loss

training_loss_per_epoch = []
test_loss_per_epoch = []

for epoch_index in range(number_of_epochs):
number_of_train_samples = x_train.shape[0]
shuffled_indices = torch.randperm(number_of_train_samples, dtype=torch.int32)
batches_indices = torch.split(shuffled_indices, batch_size)
for batch_indices in batches_indices:
batch_x = x_train[batch_indices]
batch_y = y_train[batch_indices]
predictions = model(batch_x)
loss = loss_function(predictions, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()

with torch.no_grad():
training_predictions = model(x_train)
training_loss = loss_function(training_predictions, y_train)
training_loss_per_epoch.append(training_loss)
test_predictions = model(x_test)
test_loss = loss_function(test_predictions, y_test)
test_loss_per_epoch.append(test_loss)

print('epoch {} accuracy: train={} test={}'.format(
epoch_index, training_loss, test_loss))

with torch.no_grad():
print('example of regression targets vs. predictions')
examples_number = 10
shuffled_indices = torch.randperm(number_of_samples, dtype=torch.int32)
samples_x = x[shuffled_indices][:examples_number]
samples_y = y[shuffled_indices][:examples_number]
samples_predictions = model(samples_x)
print(torch.cat((samples_y, samples_predictions), dim=1))

plt.plot(training_loss_per_epoch)
plt.plot(test_loss_per_epoch)
plt.xlabel('Epoch Number')
plt.ylabel('Loss')
plt.legend(['Train', 'Test'])
plt.savefig('output/over_epochs.pdf')


main()

Single class classification


import matplotlib.pyplot as plt
import numpy as np
import torch.nn


class LogisticRegressionNN(torch.nn.Module):

def __init__(self, num_features):
super(LogisticRegressionNN, self).__init__()
self.linear = torch.nn.Linear(num_features, 1)

def forward(self, x):
logits = self.linear(x)
probas = torch.sigmoid(logits)
return probas


def main():
random_seed = 42
learning_rate = 0.01
number_of_epochs = 100
batch_size = 10

torch.manual_seed(random_seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = np.genfromtxt('input/single_classification.csv', delimiter=',')
data = torch.tensor(data, dtype=torch.float32)
data = data.to(device=device)

x = data[:, :-1]
y = data[:, -1:]

print('x shape', x.shape)
print('y shape', y.shape)
print('x first sample', x[0])
print('y first sample', y[0])

number_of_samples = x.shape[0]
number_of_features = x.shape[1]
train_factor = 0.7
train_size = int(train_factor * number_of_samples)
x_train, x_test = x[:train_size], x[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

print('x train shape', x_train.shape)
print('y train shape', y_train.shape)
print('x test shape', x_test.shape)
print('y test shape', y_test.shape)

model = LogisticRegressionNN(num_features=number_of_features).to(device)
model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
loss_function = torch.nn.functional.binary_cross_entropy

training_loss_per_epoch = []
test_loss_per_epoch = []

for epoch_index in range(number_of_epochs):
number_of_train_samples = x_train.shape[0]
shuffled_indices = torch.randperm(number_of_train_samples, dtype=torch.int32)
batches_indices = torch.split(shuffled_indices, batch_size)
for batch_indices in batches_indices:
batch_x = x_train[batch_indices]
batch_y = y_train[batch_indices]
predictions = model(batch_x)
loss = loss_function(predictions, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()

with torch.no_grad():
training_predictions = model(x_train)
training_loss = loss_function(training_predictions, y_train)
training_loss_per_epoch.append(training_loss)
test_predictions = model(x_test)
test_loss = loss_function(test_predictions, y_test)
test_loss_per_epoch.append(test_loss)

print('epoch {} loss: train={} test={}'.format(
epoch_index, training_loss, test_loss))

with torch.no_grad():
print('example of classification targets vs. predictions')
examples_number = 10
shuffled_indices = torch.randperm(number_of_samples, dtype=torch.int32)
samples_x = x[shuffled_indices][:examples_number]
samples_y = y[shuffled_indices][:examples_number]
samples_predictions = model(samples_x)
print(torch.cat((samples_y, samples_predictions), dim=1))

plt.plot(training_loss_per_epoch)
plt.plot(test_loss_per_epoch)
plt.xlabel('Epoch Number')
plt.ylabel('Loss')
plt.legend(['Train', 'Test'])
plt.savefig('output/over_epochs.pdf')


main()


Multi class classification


import matplotlib.pyplot as plt
import pandas as pd
import torch.nn


class SoftmaxNN(torch.nn.Module):

def __init__(self, num_features, num_classes):
super(SoftmaxNN, self).__init__()
self.linear = torch.nn.Linear(num_features, num_classes)

def forward(self, x):
logits = self.linear(x)
probas = torch.nn.functional.softmax(logits, dim=1)
return logits, probas


def compute_accuracy(true_labels, predicated_labels):
bool_equal_matrix = true_labels.view(-1).float() == predicated_labels.float()
return torch.sum(bool_equal_matrix).item() / true_labels.size(0)


def main():
random_seed = 42
learning_rate = 0.1
number_of_epochs = 100
batch_size = 10

torch.manual_seed(random_seed)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = pd.read_csv('input/multi_classification.csv', index_col=None, header=None)

data_classes = {'Iris-versicolor': 1,
'Iris-virginica': 2,
'Iris-setosa': 0,
}

data.columns = ['x1', 'x2', 'x3', 'x4', 'y']

data['y'] = data['y'].map(data_classes)

x = torch.tensor(data[['x2', 'x4']].values, dtype=torch.float)
y = torch.tensor(data['y'].values, dtype=torch.int)

print('x shape', x.shape)
print('y shape', y.shape)
print('x first sample', x[0])
print('y first sample', y[0])

number_of_samples = x.shape[0]
number_of_features = x.shape[1]
train_factor = 0.7
train_size = int(train_factor * number_of_samples)
x_train, x_test = x[:train_size], x[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

print('x train shape', x_train.shape)
print('y train shape', y_train.shape)
print('x test shape', x_test.shape)
print('y test shape', y_test.shape)

model = SoftmaxNN(num_features=number_of_features, num_classes=len(data_classes)).to(device)
model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
loss_function = torch.nn.functional.cross_entropy

training_loss_per_epoch = []
test_loss_per_epoch = []

for epoch_index in range(number_of_epochs):
number_of_train_samples = x_train.shape[0]
shuffled_indices = torch.randperm(number_of_train_samples, dtype=torch.int32)
batches_indices = torch.split(shuffled_indices, batch_size)
for batch_indices in batches_indices:
batch_x = x_train[batch_indices]
batch_y = y_train[batch_indices]
logits, probabilities = model(batch_x)
loss = loss_function(logits, batch_y.long())
optimizer.zero_grad()
loss.backward()
optimizer.step()

with torch.no_grad():
training_logits, training_probabilities = model(x_train)
training_accuracy = compute_accuracy(y_train, torch.argmax(training_probabilities, dim=1))
training_loss = loss_function(training_logits, y_train.long())
training_loss_per_epoch.append(training_loss)

test_logits, test_probabilities = model(x_test)
test_accuracy = compute_accuracy(y_test, torch.argmax(test_probabilities, dim=1))
test_loss = loss_function(test_logits, y_test.long())
test_loss_per_epoch.append(test_loss)

print('epoch {} loss: train={} test={} accuracy: train={} test={}'.format(
epoch_index, training_loss, test_loss, training_accuracy, test_accuracy))

with torch.no_grad():
print('example of classification targets vs. predictions')
examples_number = 10
shuffled_indices = torch.randperm(number_of_samples, dtype=torch.int32)
samples_x = x[shuffled_indices][:examples_number]
samples_y = y[shuffled_indices][:examples_number]
_, samples_probabilities = model(samples_x)
samples_predictions = torch.argmax(samples_probabilities, dim=1)
print(samples_y)
print(samples_predictions)

plt.plot(training_loss_per_epoch)
plt.plot(test_loss_per_epoch)
plt.xlabel('Epoch Number')
plt.ylabel('Loss')
plt.legend(['Train', 'Test'])
plt.savefig('output/over_epochs.pdf')


main()

No comments:

Post a Comment