This post includes examples for basic usage of PyTorch.
For the learning adaline, the input was downloaded from here.
import matplotlib.pyplot as plt
import numpy as np
import torch
def basic_usage():
print('=== basic usage ===')
print('torch version', torch.__version__)
t1 = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=torch.float32)
print('tensor data type', t1.dtype)
print('tensor #dimensions', t1.ndim)
print('tenor shape', t1.shape)
print('tensor data', t1)
print('convert type', t1.to(torch.int))
print('broadcasting')
print(t1 + 100)
print(t1 + torch.tensor([100, 200, 300]))
t2 = torch.tensor([[1., 1.], [1., 1.], [1., 1.]])
print('tensor multiplication', t1 @ t2)
print('convert to numpy - for work with matplotlib')
print(t1.numpy())
print('reshaping')
t3 = torch.arange(6)
print(t3)
print(t3.view(2, 3))
print('column vector', t3.view(-1, 1))
def gpu_selection():
print('=== check GPU ===')
# can run the CLI `nvidia-smi` to show GPU (if exists)
available = torch.cuda.is_available()
print('GPU available?', available)
if available:
torch_device = torch.device('cuda:0')
else:
torch_device = torch.device('cpu')
t1 = torch.tensor([1, 2, 3], dtype=torch.float32, device=torch_device)
def adaline_network():
data = np.genfromtxt('input/input.csv', delimiter=',')
data = torch.tensor(data, dtype=torch.float32)
x = data[:, :-1]
y = data[:, -2:-1]
number_of_samples = x.shape[0]
number_of_features = x.shape[1]
train_factor = 0.7
train_size = int(train_factor * number_of_samples)
x_train, x_test = x[:train_size], x[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
random_seed = 42
learning_rate = 0.01
number_of_epochs = 10
batch_size = 64
torch.manual_seed(random_seed)
linear = torch.nn.Linear(in_features=number_of_features, out_features=1)
optimizer = torch.optim.SGD(linear.parameters(), lr=learning_rate)
training_mean_squared_errors_per_epoch = []
test_mean_squared_errors_per_epoch = []
for epoch_number in range(number_of_epochs):
number_of_train_samples = x_train.shape[0]
shuffled_indices = torch.randperm(number_of_train_samples, dtype=torch.int32)
batches_indices = torch.split(shuffled_indices, batch_size)
for batch_indices in batches_indices:
batch_x = x_train[batch_indices]
batch_y = y_train[batch_indices]
training_predictions = linear.forward(batch_x)
training_mean_squared_error = torch.nn.functional.mse_loss(training_predictions, batch_y)
optimizer.zero_grad()
training_mean_squared_error.backward()
optimizer.step()
with torch.no_grad():
training_predictions = linear(x_train)
training_mean_squared_error = torch.nn.functional.mse_loss(training_predictions, y_train).float()
training_mean_squared_errors_per_epoch.append(training_mean_squared_error)
test_predictions = linear(x_test)
test_mean_squared_error = torch.nn.functional.mse_loss(test_predictions, y_test).float()
test_mean_squared_errors_per_epoch.append(test_mean_squared_error)
print('epoch {} mean squared error: train={} test={}',
epoch_number, training_mean_squared_error, test_mean_squared_error)
plt.plot(training_mean_squared_errors_per_epoch)
plt.plot(test_mean_squared_errors_per_epoch)
plt.xlabel('Epoch Number')
plt.ylabel('Mean Squared Error')
plt.legend(['Train', 'Test'])
plt.savefig('output/mean_squared_error_over_epochs.pdf')
basic_usage()
gpu_selection()
adaline_network()
No comments:
Post a Comment