感知机学习算法 python实现

参考李航《统计学习方法》 一开始的感知机章节,看着不太复杂就实现一下。。。

 1 """
 2 感知机学习算法的原始形式
 3 例2.1
 4 """
 5 import numpy as np
 6 
 7 class Perceptron:
 8     def __init__(self,w,b,alpha):
 9         self.w = w
10         self.b = b
11         self.alpha = alpha
12 
13     def loss(self,x,y):
14         return np.sum( y*(np.dot(x, self.w) + self.b) )
15     
16     def sgd(self,x,y):  # 随机梯度下降函数
17         self.w += self.alpha * y * x
18         self.b += self.alpha * y
19 
20     def train(self,X,Y):
21         while(True):
22             M = len(X)   # 错误分类数
23             for i in range(len(X)):
24                 if self.loss(X[i],Y[i])<=0:
25                     self.sgd(X[i],Y[i])
26                     print "w:",self.w," b:",self.b
27                 else:
28                     M -= 1
29             if not M:
30                 print "final optimal:","w:",self.w," b:",self.b
31                 break
32 
33 class Perceptron_dual:
34     def __init__(self,alpha,b,ita):
35         self.alpha = alpha
36         self.b = b
37         self.ita = ita
38 
39     def gram(self,X):
40         return np.dot(X,X.T)
41 
42     def train(self,X,Y):
43         g = self.gram(X)
44         
45         M = len(X)   # 错误分类数
46         while(True):
47             M = len(X)   # 错误分类数
48             for j in range(len(X)):
49                 if Y[j] * (np.sum(self.alpha * Y * g[j]) + self.b) <= 0:
50                     self.alpha[j] += self.ita
51                     self.b += self.ita * Y[j]
52                     print "a:",self.alpha," b:",self.b
53                 else:
54                     M -= 1
55             if M == 0:
56                 print "final optimal:","a:",self.alpha," b:",self.b
57                 break
58 
59 if __name__ == "__main__":
60 
61     X = np.array([[3,3],[4,3],[1,1]])
62     
63     Y = np.array([1,1,-1])
64     perc_d = Perceptron_dual(np.zeros(Y.shape),0,1)
65     perc_d.train(X, Y)

 

posted on 2015-07-31 22:18  hanahimi  阅读(772)  评论(0编辑  收藏  举报

导航