-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
50 lines (41 loc) · 1.89 KB
/
train.py
File metadata and controls
50 lines (41 loc) · 1.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
# train.py
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, SubsetRandomSampler
import numpy as np
from model import FCNN # Import the model
if __name__ == "__main__":
# Optimal hyperparameters found from optimise.py
BEST_LEARNING_RATE = 0.001065741156220378
BEST_BATCH_SIZE = 128
BEST_EPOCHS = 40
transform_list = transforms.Compose([
transforms.RandomRotation(10), # Small random rotations
transforms.RandomAffine(0, translate=(0.1, 0.1)), # Small translations
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform_list)
testset = datasets.MNIST(root='./data', train=False, download=True, transform=transform_list)
# Data loaders with new batch size
train_loader = torch.utils.data.DataLoader(trainset, batch_size=BEST_BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=BEST_BATCH_SIZE, shuffle=True)
# Model, loss function, and optimizer with the best hyperparameters
model = FCNN()
optimizer = torch.optim.Adam(model.parameters(), lr=BEST_LEARNING_RATE)
loss_function = nn.CrossEntropyLoss()
# Training loop with the best epochs
for epoch in range(BEST_EPOCHS):
current_loss = 0.0
for images, labels in train_loader:
optimizer.zero_grad()
outputs = model(images)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
current_loss += loss.item()
print(f'Epoch {epoch+1}, Loss: {current_loss/len(train_loader):.4f}')
# Save the final trained model
torch.save(model.state_dict(), "model/image_classifier.pt")