机器学习算法原理实现——跟着gpt学习svm求解的SMO算法

 

 

 

 

 

 

 

 

 

算法实现:

import numpy as np
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import random as rnd

class SVM():
    def __init__(self, max_iter=100, kernel_type='linear', C=1.0, epsilon=0.001):
        self.kernels = {
            'linear' : self.kernel_linear,
            'quadratic' : self.kernel_quadratic,
            'gaussian' : self.kernel_gauss
        }
        self.max_iter = max_iter
        self.kernel_type = kernel_type
        self.C = C
        self.epsilon = epsilon

    def fit(self, X, y):
        n, d = X.shape[0], X.shape[1]
        alpha = np.zeros((n))
        kernel = self.kernels[self.kernel_type]
        count = 0
        while True:
            count += 1
            alpha_prev = np.copy(alpha)
            for j in range(0, n):
                i = self.get_rnd_int(0, n-1, j) # Get random int i~=j
                x_i, x_j, y_i, y_j = X[i,:], X[j,:], y[i], y[j]
                k_ij = kernel(x_i, x_i) + kernel(x_j, x_j) - 2 * kernel(x_i, x_j)
                if k_ij == 0:
                    continue
                alpha_prime_j, alpha_prime_i = alpha[j], alpha[i]
                (L, H) = self.compute_L_H(self.C, alpha_prime_j, alpha_prime_i, y_j, y_i)

                # Compute model parameters
                self.w = self.calc_w(alpha, y, X)
                self.b = self.calc_b(X, y, self.w)

                # Compute E_i, E_j
                E_i = self.E(x_i, y_i, self.w, self.b)
                E_j = self.E(x_j, y_j, self.w, self.b)

                # Set new alpha values
                alpha[j] = alpha_prime_j + float(y_j * (E_i - E_j))/k_ij
                alpha[j] = max(alpha[j], L)
                alpha[j] = min(alpha[j], H)

                alpha[i] = alpha_prime_i + y_i*y_j * (alpha_prime_j - alpha[j])
#                 if(j % 100 == 0):
#                     print(j)
            # Check convergence
            diff = np.linalg.norm(alpha - alpha_prev)
            if diff < self.epsilon:
                break
            #print(count)
            if count >= self.max_iter:
                print("Iteration number exceeded the max of %d iterations" % (self.max_iter))
                return
            
        self.b = self.calc_b(X, y, self.w)
        if self.kernel_type == 'linear':
            self.w = self.calc_w(alpha, y, X)
        # Get support vectors
        alpha_idx = np.where(alpha > 0)[0]
        support_vectors = X[alpha_idx, :]
        return support_vectors, count

    def predict(self, X):
        return self.h(X, self.w, self.b)
    
    def calc_b(self, X, y, w):
        b_tmp = y - np.dot(w.T, X.T)
        return np.mean(b_tmp)
    
    def calc_w(self, alpha, y, X):
        return np.dot(X.T, np.multiply(alpha,y))
    
    def h(self, X, w, b):
        return np.sign(np.dot(w.T, X.T) + b).astype(int)
    
    def E(self, x_k, y_k, w, b):
        return self.h(x_k, w, b) - y_k
    
    def compute_L_H(self, C, alpha_prime_j, alpha_prime_i, y_j, y_i):
        if(y_i != y_j):
            return (max(0, alpha_prime_j - alpha_prime_i), min(C, C - alpha_prime_i + alpha_prime_j))
        else:
            return (max(0, alpha_prime_i + alpha_prime_j - C), min(C, alpha_prime_i + alpha_prime_j))
    
    def get_rnd_int(self, a,b,z):
        i = z
        cnt=0
        while i == z and cnt<1000:
            i = rnd.randint(a,b)
            cnt=cnt+1
        return i
    
    def kernel_linear(self, x1, x2):
        return np.dot(x1, x2.T)
    
    def kernel_quadratic(self, x1, x2):
        return (np.dot(x1, x2.T) ** 2)
    
    def kernel_gauss(self,x1, x2, sigma=1):
        return np.exp(- (np.linalg.norm(x1 - x2, 2)) ** 2 / (2 * sigma ** 2))
    
    def predict_proba(self, X):
        return np.dot(self.w.T, X.T) + self.b
    

import matplotlib.pyplot as plt
# 给定二维正态分布均值矩阵
mean1, mean2 = np.array([0, 2]), np.array([2, 0])
# 给定二维正态分布协方差矩阵
covar = np.array([[1.5, 1.0], [1.0, 1.5]])
# 生成二维正态分布样本X1
X1 = np.random.multivariate_normal(mean1, covar, 100)
# 生成X1的标签1
y1 = np.ones(X1.shape[0])
# 生成二维正态分布样本X2
X2 = np.random.multivariate_normal(mean2, covar, 100)
# 生成X1的标签-1
y2 = -1 * np.ones(X2.shape[0])
# 设置训练集和测试集
X_train = np.vstack((X1[:80], X2[:80]))
y_train = np.hstack((y1[:80], y2[:80]))
X_test = np.vstack((X1[80:], X2[80:]))
y_test = np.hstack((y1[80:], y2[80:]))

X, y = X_train, y_train

model = SVM(max_iter=3, kernel_type='linear', C=3.0, epsilon=0.001)
model.fit(X_train, y_train)
y_predicted = [model.predict(x) for x in X_test]
cm = confusion_matrix(y_test, y_predicted)
accuracy = (cm[0][0] + cm[1][1]) / (cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1])
print(accuracy)

# 画图展示
fig, ax = plt.subplots()
colors = ['red' if c == -1 else 'blue' for c in y]
ax.scatter(X[:, 0], X[:, 1], c=colors)

# 创建决策边界
xx = np.linspace(-3, 7, 10)
yy = - (model.w[0] * xx + model.b) / model.w[1]
ax.plot(xx, yy, color='green', linestyle='-')
plt.show()

  

运行结果:

Iteration number exceeded the max of 3 iterations
0.975  精度还不错!
 
使用sklearn的库:
# 导入svm模块
from sklearn import svm
# 创建svm模型实例
clf = svm.SVC(kernel='linear')
# 模型拟合
clf.fit(X_train, y_train)
# 模型预测
y_pred = clf.predict(X_test)
# 计算测试集上的分类准确率
cm = confusion_matrix(y_test, y_predicted)
accuracy = (cm[0][0] + cm[1][1]) / (cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1])
print(accuracy)

  

精度也是一样!

 

 

posted @ 2023-09-14 11:41  bonelee  阅读(18)  评论(0编辑  收藏  举报