-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathoptimise.py
More file actions
84 lines (69 loc) · 2.96 KB
/
optimise.py
File metadata and controls
84 lines (69 loc) · 2.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# optimise.py
import torch
import torch.optim as optim
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, SubsetRandomSampler
import numpy as np
import optuna # A hyperparameter optimization library for automating the search for the best hyperparameters.
from model import FCNN # Import the model
# Cross-validation setup
def get_data_loaders(trainset, batch_size, validation_split=0.2):
dataset_size = len(trainset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(trainset, batch_size=batch_size, sampler=train_sampler)
val_loader = DataLoader(trainset, batch_size=batch_size, sampler=val_sampler)
return train_loader, val_loader
# Objective function for Optuna
def objective(trial):
# Hyperparameter suggestions
lr = trial.suggest_loguniform('lr', 1e-4, 1e-1)
batch_size = trial.suggest_int('batch_size', 16, 128, step=16)
# Create model and optimizer
model = FCNN()
optimizer = optim.Adam(model.parameters(), lr=lr)
# Get data loaders with cross-validation
train_loader, val_loader = get_data_loaders(trainset, batch_size)
# Training loop
for epoch in range(40): # Diminishing returns after 40 epochs
model.train()
for images, labels in train_loader:
optimizer.zero_grad()
outputs = model(images)
loss = loss_function(outputs, labels)
loss.backward() # back propagation
optimizer.step()
# Validation accuracy
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in val_loader:
outputs = model(images)
_, predicted = torch.max(outputs, dim=1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return accuracy
if __name__ == "__main__":
# Loss function
loss_function = nn.CrossEntropyLoss()
transform_list = transforms.Compose([
transforms.RandomRotation(10), # Small random rotations
transforms.RandomAffine(0, translate=(0.1, 0.1)), # Small translations
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
# Load the MNIST dataset
trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform_list)
testset = datasets.MNIST(root='./data', train=False, download=True, transform=transform_list)
# Run the optimization
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=20)
# Print the best hyperparameters
print("Best Hyperparameters:", study.best_params)