@@ -565,6 +565,8 @@ the trouble of performing the transformation
565565$\phi(\bm{x}_i)^T\phi(\bm{x}_j)$ during the SVM calculations.
566566
567567
568+
569+
568570!split
569571===== The problem to solve =====
570572Using our definition of the kernel We can rewrite again the Lagrangian
@@ -613,6 +615,14 @@ o Tanh: $K(\bm{v},\bm{w})=\tanh{(\bm{v}^T\bm{w}+\gamma)}$,
613615and many other ones.
614616
615617
618+ The kernel trick involves implicitly mapping the input data into a
619+ higher-dimensional feature space where a linear separation is
620+ possible. Instead of explicitly transforming each data point using a
621+ mapping function, the kernel function computes the inner product
622+ directly. This approach avoids the computational cost of handling
623+ high-dimensional spaces explicitly.
624+
625+
616626!split
617627===== Mercer's theorem =====
618628
@@ -832,6 +842,51 @@ plt.show()
832842!ec
833843
834844
845+ !split
846+ ===== The Iris data =====
847+
848+ !bc pycod
849+ # import the required packages
850+ import numpy as np
851+ import pandas as pd
852+ from sklearn.model_selection import train_test_split
853+ from sklearn.svm import SVC
854+ from sklearn.datasets import load_iris
855+ from sklearn.metrics import accuracy_score,classification_report,f1_score
856+ iris = load_iris()
857+ dataset=pd.DataFrame(iris.data,columns=iris.feature_names)
858+ dataset["target"]=iris.target
859+ dataset
860+ features= dataset[dataset.columns[1:4]]
861+ target=dataset["target"]
862+ # split the data into training set and testing set
863+ X_train,X_test,Y_train,Y_test=train_test_split(features,target,test_size=0.25,random_state=42)
864+ # Scale the data
865+ from sklearn.preprocessing import MinMaxScaler
866+
867+ minmax=MinMaxScaler()
868+ X_train=minmax.fit_transform(X_train)
869+ X_test=minmax.transform(X_test)
870+ X_test=np.clip(X_test,0,1)
871+ import time
872+ start_time=time.time()
873+ svm_model=SVC(kernel="rbf")
874+ svm_model.fit(X_train, Y_train)
875+ end_time=time.time()
876+ print(end_time-start_time)
877+ predictions = svm_model.predict(X_test)
878+
879+ # Calculate accuracy and other metrics
880+ predictions=svm_model.predict(X_test)
881+ accuracy=accuracy_score(Y_test,predictions)
882+ print(f"the accuracy is: {accuracy:.2f}%")
883+ mocrof1=f1_score(Y_test,predictions,average='macro')
884+ print(f"the mocro f1-score is:{mocrof1:.2f}%")
885+ print(classification_report(Y_test,predictions))
886+ !ec
887+
888+
889+
835890!split
836891===== Quantum SVMs =====
837892
@@ -841,6 +896,49 @@ separates these two groups. This line can be linear, but it can also be
841896much more complex, which can be achieved by the use of Kernels.
842897
843898
899+ !split
900+ ===== How QSVM works =====
901+
902+
903+ !split
904+ ===== Basic steps: Data Encoding (Quantum Feature Mapping) =====
905+
906+ !bblock
907+ Mapping to Quantum States: Input data is first encoded into quantum
908+ states. This process is akin to a quantum feature map, where classical
909+ information is represented in the quantum Hilbert space.
910+ !eblock
911+
912+ !split
913+ ===== Basic steps: Quantum Kernel =====
914+ !bblock
915+ In many QSVM implementations, this mapping enables the computation of
916+ a kernel matrix, where the similarity (or inner product) between
917+ quantum states is evaluated. This quantum kernel can capture complex
918+ relationships in data that might be challenging for classical kernels.
919+ !eblock
920+
921+ !split
922+ ===== Basic steps: Quantum Circuit Processing =====
923+
924+ !bblock
925+ Quantum Gates and Circuits: Once data is encoded, a series of quantum
926+ gates (forming a quantum circuit) is applied. These gates manipulate
927+ the quantum states to perform the equivalent of the SVM algorithm’s
928+ optimization and decision-making steps.
929+ !eblock
930+
931+ !split
932+ ===== Basic steps: Measurement =====
933+
934+ !bblock
935+ Outcome: After the quantum operations, a measurement is performed on
936+ the output state. The result of this measurement provides the
937+ classification outcome, analogous to deciding on which side of the
938+ hyperplane a data point lies.
939+ !eblock
940+
941+
844942!split
845943===== Quantum Kernels and Feature Maps =====
846944
@@ -1562,13 +1660,27 @@ o define a kernel function via that QNode;
15621660o compute kernel matrices with qml.kernels;
15631661o train/predict using an SVM with the precomputed kernels.
15641662
1663+ !split
1664+ ===== Steps in Quantum Kernel SVM =====
1665+
1666+ o Data Encoding (Feature Map Generation)
1667+ * The classical data (x) is encoded into a quantum state using a quantum feature map (quantum circuit).
1668+
1669+ o Quantum Kernel Computation
1670+ * The kernel matrix is computed using a quantum computer by measuring the inner product of quantum states.
1671+
1672+ o Classical SVM Training
1673+ * The quantum kernel matrix is used in a classical SVM algorithm to find the optimal decision boundary.
1674+
1675+ o Prediction
1676+ * New data is classified using the trained SVM model with the quantum kernel.
1677+
15651678
15661679!split
15671680===== Iris Dataset =====
15681681
1569- !bc pycod
1570- # Quantum Kernel SVM on Iris (Setosa vs Versicolor) using PennyLane and scikit-learn
15711682
1683+ !bc pycod
15721684import pennylane as qml
15731685from pennylane import numpy as np
15741686from sklearn import datasets
@@ -1577,75 +1689,46 @@ from sklearn.decomposition import PCA
15771689from sklearn.model_selection import train_test_split
15781690from sklearn.svm import SVC
15791691from sklearn.metrics import accuracy_score, classification_report
1692+ num_qubits=4
1693+ device=qml.device('default.qubit',wires=num_qubits)
15801694
1581- # Load Iris dataset and select two classes (0: setosa, 1: versicolor)
1582- iris = datasets.load_iris()
1583- X = iris.data
1584- y = iris.target
1585- mask = y != 2 # drop class '2' (virginica)
1586- X = X[mask]
1587- y = y[mask]
15881695
1589- # Standardize features and reduce to 2 dimensions via PCA
1590- scaler = StandardScaler()
1591- X_scaled = scaler.fit_transform(X)
1592- pca = PCA(n_components=2)
1593- X_reduced = pca.fit_transform(X_scaled)
1594-
1595- # Split into train and test sets
1596- X_train, X_test, y_train, y_test = train_test_split(
1597- X_reduced, y, test_size=0.2, random_state=42
1598- )
1599- # Define quantum device and feature map (angle encoding on 2 qubits)
1600- n_qubits = 2
1601- dev = qml.device('default.qubit', wires=n_qubits)
1696+ @qml.qnode(device)
1697+ def qnode(inputs,weights):
1698+ qml.templates.AngleEmbedding(inputs,
1699+ wires=range(num_qubits),
1700+ rotation='Y') # encode the inputs into the quantum state by rotating each qubit along the Y-axis.
1701+ qml.adjoint(qml.AngleEmbedding(features=weights,
1702+ wires=range(num_qubits),
1703+ rotation='Y')) # applies the inverse transformation using the weights parameter.
1704+ return qml.probs(wires=range(num_qubits))
1705+
1706+
1707+ def qkernel(inputs,weights):
1708+ return np.array([[qnode(input,weight)[0] for weight in weights] for input in inputs ]) # compute the quantum kernel
1709+
1710+
1711+ # Fit the model
1712+ start_time=time.time()
1713+ qsvm=SVC(kernel=qkernel)
1714+ qsvm.fit(X_train,Y_train)
1715+ end_time=time.time()
1716+ print(end_time-start_time)
1717+
1718+ # Make predictions and calculate the accuracy
1719+ predictions=qsvm.predict(X_test)
1720+ accuracy=accuracy_score(Y_test,predictions)
1721+ print(f"the accuracy is: {accuracy:.2f} %")
1722+ macrof1=f1_score(Y_test,predictions,average='macro')
1723+ print(f"the mocro f1-score is: {mocrof1:.2f}%")
1724+ print(classification_report(Y_test,predictions))
16021725
1603- @qml.qnode(dev)
1604- def feature_map(x):
1605- # Encode 2 features into rotation angles
1606- for i in range(n_qubits):
1607- qml.RY(x[i] * np.pi, wires=i)
1608- # Optional: add entanglement (e.g., ZZ interaction)
1609- qml.CNOT(wires=[0, 1])
1610- qml.RZ((x[0] + x[1]) * np.pi, wires=1)
1611- qml.CNOT(wires=[0, 1])
1612- return qml.state()
1613-
1614- # Compute quantum kernel (fidelity) between two feature vectors
1615- def quantum_kernel(x1, x2):
1616- # Compute state vectors for each input
1617- state1 = feature_map(x1)
1618- state2 = feature_map(x2)
1619- # Kernel = |<phi(x1)|phi(x2)>|^2
1620- overlap = np.vdot(state1, state2)
1621- return np.abs(overlap) ** 2
1622-
1623- # Build kernel (Gram) matrices for training and test sets
1624- n_train = len(X_train)
1625- n_test = len(X_test)
1626- kernel_train = np.zeros((n_train, n_train))
1627- for i in range(n_train):
1628- for j in range(n_train):
1629- kernel_train[i, j] = quantum_kernel(X_train[i], X_train[j])
1630-
1631- kernel_test = np.zeros((n_test, n_train))
1632- for i in range(n_test):
1633- for j in range(n_train):
1634- kernel_test[i, j] = quantum_kernel(X_test[i], X_train[j])
1635-
1636- # Train SVM with precomputed quantum kernel
1637- svm = SVC(kernel='precomputed')
1638- svm.fit(kernel_train, y_train)
1639-
1640- # Predict on test set and evaluate
1641- y_pred = svm.predict(kernel_test)
1642- acc = accuracy_score(y_test, y_pred)
1643- print("Test Accuracy:", acc)
1644- print("\nClassification Report:\n", classification_report(y_test, y_pred))
16451726
16461727!ec
16471728
16481729
1730+
1731+
16491732!split
16501733===== Qiskit implementation =====
16511734
0 commit comments