神经网络对比

 

1. 只能分两类

层数固定不变层数可以变化
''' 
11行神经网络①  
固定三层,两类
'''
# 只适合 0, 1 两类。若不是,要先转化
import numpy as np

X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
y = np.array([0,1,1,0]).reshape(-1,1) # 此处reshape是为了便于算法简洁实现

wi = 2*np.random.randn(3,5) - 1
wh = 2*np.random.randn(5,1) - 1

for j in range(10000):
    li = X
    lh = 1/(1+np.exp(-(np.dot(li,wi))))
    lo = 1/(1+np.exp(-(np.dot(lh,wh))))
    
    lo_delta = (y - lo)*(lo*(1-lo))
    lh_delta = np.dot(lo_delta, wh.T) * (lh * (1-lh))
    
    wh += np.dot(lh.T, lo_delta)
    wi += np.dot(li.T, lh_delta)
    
print('训练结果:', lo)
'''
11行神经网络① 
层数可变,两类
'''
# 只适合 0, 1 两类。若不是,要先转化
import numpy as np

X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
y = np.array([0,1,1,0]).reshape(-1,1) # 此处reshape是为了便于算法简洁实现

neurals = [3,15,1]
w
= [np.random.randn(i,j) for i,j in zip(neurals[:-1], neurals[1:])] + [None] l = [None] * len(neurals) l_delta = [None] * len(neurals) for j in range(1000): l[0] = X for i in range(1, len(neurals)): l[i] = 1 / (1 + np.exp(-(np.dot(l[i-1], w[i-1])))) l_delta[-1] = (y - l[-1]) * (l[-1] * (1 - l[-1])) for i in range(len(neurals)-2, 0, -1): l_delta[i] = np.dot(l_delta[i+1], w[i].T) * (l[i] * (1 - l[i])) for i in range(len(neurals)-2, -1, -1): w[i] += np.dot(l[i].T, l_delta[i+1]) print('训练结果:', l[-1])

 

 

2.可以分多类

层数固定不变层数可以变化
'''
11行神经网络① 
固定三层,多类
'''
import numpy as np

X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
#y = np.array([0,1,1,0]) # 可以两类
y = np.array([0,1,2,3])  # 可以多类

wi = np.random.randn(3,5)
wh = np.random.randn(5,4) #
bh = np.random.randn(1,5)
bo = np.random.randn(1,4) #

epsilon = 0.01    # 学习速率
lamda = 0.01      # 正则化强度

for j in range(1000):
    li = X
    lh = np.tanh(np.dot(li, wi) + bh)     # tanh 函数
    lo = np.exp(np.dot(lh, wh) + bo)
    probs = lo / np.sum(lo, axis=1, keepdims=True)

    # 后向传播
    lo_delta = np.copy(probs)
    lo_delta[range(X.shape[0]), y] -= 1
    lh_delta = np.dot(lo_delta, wh.T) * (1 - np.power(lh, 2)) 

    # 更新权值、偏置
    wh -= epsilon * (np.dot(lh.T, lo_delta) + lamda * wh)
    wi -= epsilon * (np.dot(li.T, lh_delta) + lamda * wi)
    
    bo -= epsilon * np.sum(lo_delta, axis=0, keepdims=True)
    bh -= epsilon * np.sum(lh_delta, axis=0)
    
print('训练结果:', np.argmax(probs, axis=1))

 

'''
11行神经网络① 
层数可变,多类
'''
import numpy as np

X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
#y = np.array([0,1,1,0]) # 可以两类
y = np.array([0,1,2,3])  # 可以多类

neurals = [3, 10, 8, 4]
w
= [np.random.randn(i,j) for i,j in zip(neurals[:-1], neurals[1:])] + [None] b = [None] + [np.random.randn(1,j) for j in neurals[1:]] l = [None] * len(neurals) l_delta = [None] * len(neurals) epsilon = 0.01 # 学习速率 lamda = 0.01 # 正则化强度 for j in range(1000): # 前向传播 l[0] = X for i in range(1, len(neurals)-1): l[i] = np.tanh(np.dot(l[i-1], w[i-1]) + b[i]) # tanh 函数 l[-1] = np.exp(np.dot(l[-2], w[-2]) + b[-1]) probs = l[-1] / np.sum(l[-1], axis=1, keepdims=True) # 后向传播 l_delta[-1] = np.copy(probs) l_delta[-1][range(X.shape[0]), y] -= 1 for i in range(len(neurals)-2, 0, -1): l_delta[i] = np.dot(l_delta[i+1], w[i].T) * (1 - np.power(l[i], 2)) # tanh 函数的导数 # 更新权值、偏置 b[-1] -= epsilon * np.sum(l_delta[-1], axis=0, keepdims=True) for i in range(len(neurals)-2, -1, -1): w[i] -= epsilon * (np.dot(l[i].T, l_delta[i+1]) + lamda * w[i]) if i == 0: break b[i] -= epsilon * np.sum(l_delta[i], axis=0) # 打印损失 if j % 100 == 0: loss = np.sum(-np.log(probs[range(X.shape[0]), y])) loss += lamda/2 * np.sum([np.sum(np.square(wi)) for wi in w[:-1]]) # 可选 loss *= 1/X.shape[0] # 可选 print('loss:', loss) print('训练结果:', np.argmax(probs, axis=1))

 

 

posted @ 2016-03-27 03:56  罗兵  阅读(564)  评论(0编辑  收藏  举报