Skip to content

Commit c7d709a

Browse files
committed
Create qsvmpytorch.py
1 parent ae0a70e commit c7d709a

File tree

1 file changed

+77
-0
lines changed

1 file changed

+77
-0
lines changed
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
import pennylane as qml
2+
from pennylane import numpy as np
3+
import torch
4+
from torch.utils.data import DataLoader, TensorDataset
5+
import torch.nn as nn
6+
import torch.optim as optim
7+
8+
# Create a quantum device with 2 qubits
9+
dev = qml.device("default.qubit", wires=2)
10+
11+
# Define quantum feature map (encoding classical data into quantum states)
12+
def feature_map(x):
13+
qml.Hadamard(wires=0)
14+
qml.Hadamard(wires=1)
15+
qml.RZ(x[0], wires=0)
16+
qml.RZ(x[1], wires=1)
17+
qml.CNOT(wires=[0, 1])
18+
qml.RY(x[0], wires=0)
19+
qml.RY(x[1], wires=1)
20+
21+
# Variational ansatz (simple circuit to be trained)
22+
def variational_circuit(params):
23+
qml.RY(params[0], wires=0)
24+
qml.RY(params[1], wires=1)
25+
qml.CNOT(wires=[0, 1])
26+
qml.RZ(params[2], wires=1)
27+
28+
# Quantum node
29+
@qml.qnode(dev, interface="torch")
30+
def circuit(x, weights):
31+
feature_map(x)
32+
variational_circuit(weights)
33+
return qml.expval(qml.PauliZ(0))
34+
35+
# Create a torch-compatible quantum layer
36+
class QuantumLayer(nn.Module):
37+
def __init__(self):
38+
super().__init__()
39+
# Initialize trainable parameters
40+
self.weights = nn.Parameter(0.01 * torch.randn(3))
41+
42+
def forward(self, x):
43+
# Apply quantum circuit to each input in the batch
44+
return torch.stack([circuit(x[i], self.weights) for i in range(x.shape[0])])
45+
46+
# Define the full model
47+
class QSVM(nn.Module):
48+
def __init__(self):
49+
super().__init__()
50+
self.q_layer = QuantumLayer()
51+
self.classifier = nn.Linear(1, 1)
52+
53+
def forward(self, x):
54+
q_out = self.q_layer(x).unsqueeze(1) # Add dimension for linear layer
55+
return torch.sigmoid(self.classifier(q_out))
56+
57+
# Example toy dataset (linearly separable)
58+
X = torch.tensor([[0.1, 0.2], [1.2, 0.9], [0.2, 0.1], [1.0, 1.1]], dtype=torch.float32)
59+
Y = torch.tensor([[0.], [1.], [0.], [1.]], dtype=torch.float32)
60+
61+
dataset = TensorDataset(X, Y)
62+
loader = DataLoader(dataset, batch_size=2, shuffle=True)
63+
64+
# Instantiate model, loss, and optimizer
65+
model = QSVM()
66+
criterion = nn.BCELoss()
67+
optimizer = optim.Adam(model.parameters(), lr=0.01)
68+
69+
# Training loop
70+
for epoch in range(50):
71+
for xb, yb in loader:
72+
pred = model(xb)
73+
loss = criterion(pred, yb)
74+
optimizer.zero_grad()
75+
loss.backward()
76+
optimizer.step()
77+
print(f"Epoch {epoch+1}, Loss: {loss.item():.4f}")

0 commit comments

Comments
 (0)