Skip to content

Commit 0eab906

Browse files
committed
added codes
1 parent 4cbbc13 commit 0eab906

11 files changed

+263
-0
lines changed
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
### Autoencoder Implementation in TensorFlow/Keras
2+
import numpy as np
3+
import matplotlib.pyplot as plt
4+
from tensorflow import keras
5+
from tensorflow.keras import layers
6+
from tensorflow.keras.datasets import mnist
7+
8+
# Load MNIST dataset
9+
(x_train, _), (x_test, _) = mnist.load_data()
10+
11+
# Normalize the images to [0, 1] range and reshape them to (num_samples, 28*28)
12+
x_train = x_train.astype('float32') / 255.
13+
x_test = x_test.astype('float32') / 255.
14+
x_train = x_train.reshape((len(x_train), -1))
15+
x_test = x_test.reshape((len(x_test), -1))
16+
17+
# Define the Autoencoder Model
18+
input_dim = x_train.shape[1]
19+
encoding_dim = 64 # Dimension of the encoding layer
20+
21+
# Encoder
22+
input_img = layers.Input(shape=(input_dim,))
23+
encoded = layers.Dense(256, activation='relu')(input_img)
24+
encoded = layers.Dense(encoding_dim, activation='relu')(encoded)
25+
26+
# Decoder
27+
decoded = layers.Dense(256, activation='relu')(encoded)
28+
decoded = layers.Dense(input_dim, activation='sigmoid')(decoded) # Use sigmoid since we normalized input between 0 and 1.
29+
30+
# Autoencoder Model
31+
autoencoder = keras.Model(input_img, decoded)
32+
33+
# Compile the model
34+
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
35+
36+
# Train the model
37+
autoencoder.fit(x_train, x_train,
38+
epochs=10,
39+
batch_size=128,
40+
shuffle=True,
41+
validation_data=(x_test, x_test))
42+
43+
# Visualize some results after training
44+
decoded_imgs = autoencoder.predict(x_test)
45+
46+
n = 8 # Number of digits to display
47+
plt.figure(figsize=(9,4))
48+
for i in range(n):
49+
# Display original images on top row
50+
ax = plt.subplot(2,n,i+1)
51+
plt.imshow(x_test[i].reshape(28, 28), cmap='gray')
52+
ax.axis('off')
53+
54+
# Display reconstructed images on bottom row
55+
ax = plt.subplot(2,n,i+n+1)
56+
plt.imshow(decoded_imgs[i].reshape(28, 28), cmap='gray')
57+
ax.axis('off')
58+
59+
plt.show()
60+
61+
'''
62+
### Explanation:
63+
64+
- **Data Loading**: The MNIST dataset is loaded and normalized to a range of [0, 1]. Each image is reshaped into a flat vector.
65+
- **Model Definition**: An autoencoder architecture is defined with an encoder that compresses the input and a decoder that reconstructs it back to its original form.
66+
- **Training**: The model is trained using binary crossentropy as the loss function over several epochs.
67+
- **Visualization**: After training completes, it visualizes original images alongside their reconstructions.
68+
'''
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
### Autoencoder Implementation in PyTorch
2+
3+
import torch
4+
import torch.nn as nn
5+
import torch.optim as optim
6+
from torchvision import datasets, transforms
7+
from torch.utils.data import DataLoader
8+
import matplotlib.pyplot as plt
9+
10+
# Hyperparameters
11+
batch_size = 128
12+
learning_rate = 0.001
13+
num_epochs = 10
14+
15+
# Transform to normalize the data
16+
transform = transforms.Compose([
17+
transforms.ToTensor(),
18+
transforms.Normalize((0.5,), (0.5,))
19+
])
20+
21+
# Load MNIST dataset
22+
train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True)
23+
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
24+
25+
# Define the Autoencoder Model
26+
class Autoencoder(nn.Module):
27+
def __init__(self):
28+
super(Autoencoder, self).__init__()
29+
# Encoder layers
30+
self.encoder = nn.Sequential(
31+
nn.Linear(28 * 28, 256),
32+
nn.ReLU(True),
33+
nn.Linear(256, 64),
34+
nn.ReLU(True)
35+
)
36+
# Decoder layers
37+
self.decoder = nn.Sequential(
38+
nn.Linear(64, 256),
39+
nn.ReLU(True),
40+
nn.Linear(256, 28 * 28),
41+
nn.Tanh() # Use Tanh since we normalized input between -1 and 1.
42+
)
43+
44+
def forward(self, x):
45+
x = x.view(-1, 28 * 28) # Flatten the image tensor into vectors.
46+
encoded = self.encoder(x)
47+
decoded = self.decoder(encoded)
48+
return decoded.view(-1, 1, 28, 28) # Reshape back to original image dimensions.
49+
50+
# Initialize model, loss function and optimizer
51+
model = Autoencoder()
52+
criterion = nn.MSELoss()
53+
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
54+
55+
# Training Loop
56+
for epoch in range(num_epochs):
57+
for data in train_loader:
58+
img, _ = data
59+
60+
# Forward pass
61+
output = model(img)
62+
63+
# Compute loss
64+
loss = criterion(output, img)
65+
66+
# Backward pass and optimization
67+
optimizer.zero_grad()
68+
loss.backward()
69+
optimizer.step()
70+
71+
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
72+
73+
# Visualize some results after training
74+
with torch.no_grad():
75+
sample_data = next(iter(train_loader))[0]
76+
reconstructed_data = model(sample_data)
77+
78+
plt.figure(figsize=(9,4))
79+
for i in range(8):
80+
ax = plt.subplot(2,8,i+1)
81+
plt.imshow(sample_data[i][0], cmap='gray')
82+
ax.axis('off')
83+
84+
ax = plt.subplot(2,8,i+9)
85+
plt.imshow(reconstructed_data[i][0], cmap='gray')
86+
ax.axis('off')
87+
88+
plt.show()
89+
'''
90+
### Explanation:
91+
92+
- **Data Loading**: The MNIST dataset is loaded with normalization applied.
93+
- **Model Definition**: An `Autoencoder` class defines both encoder and decoder networks.
94+
- **Training Loop**: The network is trained over several epochs using Mean Squared Error (MSE) as the loss function.
95+
- **Visualization**: After training completes, it visualizes original images alongside their reconstructions.
96+
'''
Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
Certainly! Below is a simple implementation of an autoencoder using PyTorch for the MNIST dataset. This example includes data loading, model definition, training, and evaluation.
2+
3+
### Autoencoder Implementation in PyTorch
4+
5+
```python
6+
import torch
7+
import torch.nn as nn
8+
import torch.optim as optim
9+
from torchvision import datasets, transforms
10+
from torch.utils.data import DataLoader
11+
import matplotlib.pyplot as plt
12+
13+
# Hyperparameters
14+
batch_size = 128
15+
learning_rate = 0.001
16+
num_epochs = 10
17+
18+
# Transform to normalize the data
19+
transform = transforms.Compose([
20+
transforms.ToTensor(),
21+
transforms.Normalize((0.5,), (0.5,))
22+
])
23+
24+
# Load MNIST dataset
25+
train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True)
26+
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
27+
28+
# Define the Autoencoder Model
29+
class Autoencoder(nn.Module):
30+
def __init__(self):
31+
super(Autoencoder, self).__init__()
32+
# Encoder layers
33+
self.encoder = nn.Sequential(
34+
nn.Linear(28 * 28, 256),
35+
nn.ReLU(True),
36+
nn.Linear(256, 64),
37+
nn.ReLU(True)
38+
)
39+
# Decoder layers
40+
self.decoder = nn.Sequential(
41+
nn.Linear(64, 256),
42+
nn.ReLU(True),
43+
nn.Linear(256, 28 * 28),
44+
nn.Tanh() # Use Tanh since we normalized input between -1 and 1.
45+
)
46+
47+
def forward(self, x):
48+
x = x.view(-1, 28 * 28) # Flatten the image tensor into vectors.
49+
encoded = self.encoder(x)
50+
decoded = self.decoder(encoded)
51+
return decoded.view(-1, 1, 28, 28) # Reshape back to original image dimensions.
52+
53+
# Initialize model, loss function and optimizer
54+
model = Autoencoder()
55+
criterion = nn.MSELoss()
56+
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
57+
58+
# Training Loop
59+
for epoch in range(num_epochs):
60+
for data in train_loader:
61+
img, _ = data
62+
63+
# Forward pass
64+
output = model(img)
65+
66+
# Compute loss
67+
loss = criterion(output, img)
68+
69+
# Backward pass and optimization
70+
optimizer.zero_grad()
71+
loss.backward()
72+
optimizer.step()
73+
74+
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
75+
76+
# Visualize some results after training
77+
with torch.no_grad():
78+
sample_data = next(iter(train_loader))[0]
79+
reconstructed_data = model(sample_data)
80+
81+
plt.figure(figsize=(9,4))
82+
for i in range(8):
83+
ax = plt.subplot(2,8,i+1)
84+
plt.imshow(sample_data[i][0], cmap='gray')
85+
ax.axis('off')
86+
87+
ax = plt.subplot(2,8,i+9)
88+
plt.imshow(reconstructed_data[i][0], cmap='gray')
89+
ax.axis('off')
90+
91+
plt.show()
92+
'''
93+
### Explanation:
94+
95+
- **Data Loading**: The MNIST dataset is loaded with normalization applied.
96+
- **Model Definition**: An `Autoencoder` class defines both encoder and decoder networks.
97+
- **Training Loop**: The network is trained over several epochs using Mean Squared Error (MSE) as the loss function.
98+
- **Visualization**: After training completes, it visualizes original images alongside their reconstructions.
99+
'''
7.48 MB
Binary file not shown.
1.57 MB
Binary file not shown.
9.77 KB
Binary file not shown.
4.44 KB
Binary file not shown.
44.9 MB
Binary file not shown.
9.45 MB
Binary file not shown.
58.6 KB
Binary file not shown.

0 commit comments

Comments
 (0)