86 lines
2.7 KiB
Python
86 lines
2.7 KiB
Python
import torch
|
|
import numpy as np
|
|
import scipy
|
|
|
|
# Define the hyperparameters
|
|
num_layers = 2
|
|
batch_size = 32
|
|
hidden_dim = 256
|
|
|
|
def random_rotation(inputs):
|
|
angle = np.random.uniform(-180, 180)
|
|
inputs = scipy.ndimage.rotate(inputs, angle, reshape=False)
|
|
return inputs
|
|
|
|
def random_scaling(inputs):
|
|
scale = np.random.uniform(0.8, 1.2)
|
|
inputs = scipy.ndimage.zoom(inputs, scale)
|
|
return inputs
|
|
|
|
def random_translation(inputs):
|
|
shift = np.random.uniform(-0.2, 0.2)
|
|
inputs = scipy.ndimage.shift(inputs, shift)
|
|
return inputs
|
|
|
|
def random_shearing(inputs):
|
|
shear = np.random.uniform(-0.2, 0.2)
|
|
inputs = scipy.ndimage.shear(inputs, shear)
|
|
return inputs
|
|
|
|
def random_flipping(inputs):
|
|
inputs = scipy.ndimage.flip(inputs, axis=1)
|
|
return inputs
|
|
|
|
def data_augmentation(inputs):
|
|
# Apply random rotation
|
|
inputs = random_rotation(inputs)
|
|
# Apply random scaling
|
|
inputs = random_scaling(inputs)
|
|
# Apply random translation
|
|
inputs = random_translation(inputs)
|
|
# Apply random shearing
|
|
inputs = random_shearing(inputs)
|
|
# Apply random flipping
|
|
inputs = random_flipping(inputs)
|
|
return inputs
|
|
|
|
def evaluate(model, test_data, hyperparameters, recurrent_network=False, pre_trained_model=False, fine_tuning=False):
|
|
# Use GPU for training if available
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
# Define the hidden state
|
|
hidden = (torch.zeros(num_layers, batch_size, hidden_dim).to(device),
|
|
torch.zeros(num_layers, batch_size, hidden_dim).to(device))
|
|
model.eval()
|
|
with torch.no_grad():
|
|
correct = 0
|
|
total = 0
|
|
for data in test_data:
|
|
inputs, labels = data
|
|
# Use data augmentation
|
|
inputs = data_augmentation(inputs)
|
|
# Use GPU for training
|
|
inputs = inputs.to(device)
|
|
labels = labels.to(device)
|
|
# Use recurrent network
|
|
if recurrent_network:
|
|
outputs = model(inputs, hidden)
|
|
else:
|
|
outputs = model(inputs)
|
|
# Use pre-trained model
|
|
if pre_trained_model:
|
|
outputs = model.forward_from_pretrained(inputs)
|
|
# Use fine-tuning
|
|
if fine_tuning:
|
|
outputs = model.fine_tune(inputs, hyperparameters)
|
|
_, predicted = torch.max(outputs.data, 1)
|
|
total += labels.size(0)
|
|
correct += (predicted == labels).sum().item()
|
|
accuracy = 100 * correct / total
|
|
return accuracy
|
|
|
|
def adjust_learning_rate(optimizer, epoch):
|
|
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
|
|
lr = 0.001 * (0.1 ** (epoch // 30))
|
|
for param_group in optimizer.param_groups:
|
|
param_group['lr'] = lr |