机器学习算法原理实现——跟着gpt学习svm求解的SMO算法
算法实现:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | import numpy as np from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import random as rnd class SVM(): def __init__( self , max_iter = 100 , kernel_type = 'linear' , C = 1.0 , epsilon = 0.001 ): self .kernels = { 'linear' : self .kernel_linear, 'quadratic' : self .kernel_quadratic, 'gaussian' : self .kernel_gauss } self .max_iter = max_iter self .kernel_type = kernel_type self .C = C self .epsilon = epsilon def fit( self , X, y): n, d = X.shape[ 0 ], X.shape[ 1 ] alpha = np.zeros((n)) kernel = self .kernels[ self .kernel_type] count = 0 while True : count + = 1 alpha_prev = np.copy(alpha) for j in range ( 0 , n): i = self .get_rnd_int( 0 , n - 1 , j) # Get random int i~=j x_i, x_j, y_i, y_j = X[i,:], X[j,:], y[i], y[j] k_ij = kernel(x_i, x_i) + kernel(x_j, x_j) - 2 * kernel(x_i, x_j) if k_ij = = 0 : continue alpha_prime_j, alpha_prime_i = alpha[j], alpha[i] (L, H) = self .compute_L_H( self .C, alpha_prime_j, alpha_prime_i, y_j, y_i) # Compute model parameters self .w = self .calc_w(alpha, y, X) self .b = self .calc_b(X, y, self .w) # Compute E_i, E_j E_i = self .E(x_i, y_i, self .w, self .b) E_j = self .E(x_j, y_j, self .w, self .b) # Set new alpha values alpha[j] = alpha_prime_j + float (y_j * (E_i - E_j)) / k_ij alpha[j] = max (alpha[j], L) alpha[j] = min (alpha[j], H) alpha[i] = alpha_prime_i + y_i * y_j * (alpha_prime_j - alpha[j]) # if(j % 100 == 0): # print(j) # Check convergence diff = np.linalg.norm(alpha - alpha_prev) if diff < self .epsilon: break #print(count) if count > = self .max_iter: print ( "Iteration number exceeded the max of %d iterations" % ( self .max_iter)) return self .b = self .calc_b(X, y, self .w) if self .kernel_type = = 'linear' : self .w = self .calc_w(alpha, y, X) # Get support vectors alpha_idx = np.where(alpha > 0 )[ 0 ] support_vectors = X[alpha_idx, :] return support_vectors, count def predict( self , X): return self .h(X, self .w, self .b) def calc_b( self , X, y, w): b_tmp = y - np.dot(w.T, X.T) return np.mean(b_tmp) def calc_w( self , alpha, y, X): return np.dot(X.T, np.multiply(alpha,y)) def h( self , X, w, b): return np.sign(np.dot(w.T, X.T) + b).astype( int ) def E( self , x_k, y_k, w, b): return self .h(x_k, w, b) - y_k def compute_L_H( self , C, alpha_prime_j, alpha_prime_i, y_j, y_i): if (y_i ! = y_j): return ( max ( 0 , alpha_prime_j - alpha_prime_i), min (C, C - alpha_prime_i + alpha_prime_j)) else : return ( max ( 0 , alpha_prime_i + alpha_prime_j - C), min (C, alpha_prime_i + alpha_prime_j)) def get_rnd_int( self , a,b,z): i = z cnt = 0 while i = = z and cnt< 1000 : i = rnd.randint(a,b) cnt = cnt + 1 return i def kernel_linear( self , x1, x2): return np.dot(x1, x2.T) def kernel_quadratic( self , x1, x2): return (np.dot(x1, x2.T) * * 2 ) def kernel_gauss( self ,x1, x2, sigma = 1 ): return np.exp( - (np.linalg.norm(x1 - x2, 2 )) * * 2 / ( 2 * sigma * * 2 )) def predict_proba( self , X): return np.dot( self .w.T, X.T) + self .b import matplotlib.pyplot as plt # 给定二维正态分布均值矩阵 mean1, mean2 = np.array([ 0 , 2 ]), np.array([ 2 , 0 ]) # 给定二维正态分布协方差矩阵 covar = np.array([[ 1.5 , 1.0 ], [ 1.0 , 1.5 ]]) # 生成二维正态分布样本X1 X1 = np.random.multivariate_normal(mean1, covar, 100 ) # 生成X1的标签1 y1 = np.ones(X1.shape[ 0 ]) # 生成二维正态分布样本X2 X2 = np.random.multivariate_normal(mean2, covar, 100 ) # 生成X1的标签-1 y2 = - 1 * np.ones(X2.shape[ 0 ]) # 设置训练集和测试集 X_train = np.vstack((X1[: 80 ], X2[: 80 ])) y_train = np.hstack((y1[: 80 ], y2[: 80 ])) X_test = np.vstack((X1[ 80 :], X2[ 80 :])) y_test = np.hstack((y1[ 80 :], y2[ 80 :])) X, y = X_train, y_train model = SVM(max_iter = 3 , kernel_type = 'linear' , C = 3.0 , epsilon = 0.001 ) model.fit(X_train, y_train) y_predicted = [model.predict(x) for x in X_test] cm = confusion_matrix(y_test, y_predicted) accuracy = (cm[ 0 ][ 0 ] + cm[ 1 ][ 1 ]) / (cm[ 0 ][ 0 ] + cm[ 0 ][ 1 ] + cm[ 1 ][ 0 ] + cm[ 1 ][ 1 ]) print (accuracy) # 画图展示 fig, ax = plt.subplots() colors = [ 'red' if c = = - 1 else 'blue' for c in y] ax.scatter(X[:, 0 ], X[:, 1 ], c = colors) # 创建决策边界 xx = np.linspace( - 3 , 7 , 10 ) yy = - (model.w[ 0 ] * xx + model.b) / model.w[ 1 ] ax.plot(xx, yy, color = 'green' , linestyle = '-' ) plt.show() |
运行结果:
Iteration number exceeded the max of 3 iterations
0.975 精度还不错!
使用sklearn的库:
1 2 3 4 5 6 7 8 9 10 11 12 | # 导入svm模块 from sklearn import svm # 创建svm模型实例 clf = svm.SVC(kernel = 'linear' ) # 模型拟合 clf.fit(X_train, y_train) # 模型预测 y_pred = clf.predict(X_test) # 计算测试集上的分类准确率 cm = confusion_matrix(y_test, y_predicted) accuracy = (cm[ 0 ][ 0 ] + cm[ 1 ][ 1 ]) / (cm[ 0 ][ 0 ] + cm[ 0 ][ 1 ] + cm[ 1 ][ 0 ] + cm[ 1 ][ 1 ]) print (accuracy) |
精度也是一样!
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· DeepSeek 开源周回顾「GitHub 热点速览」
2018-09-14 python 多进程——使用进程池,多进程消费的数据)是一个队列的时候,他会自动去队列里依次取数据