自己动手写一个神经网络

import numpy as np

def sigmod(x):
    return 1 / (1 + np.exp(-x))

def deriv_sigmod(x):
    fx = sigmod(x)
    return fx * (1 - fx)

def mse_loss(y_true, y_pred):
    return ((y_true - y_pred)**2).mean()

class OurNeuralNetwork:
    def __init__(self):
        #weights
        self.w1 = np.random.normal()
        self.w2 = np.random.normal()
        self.w3 = np.random.normal()
        self.w4 = np.random.normal()
        self.w5 = np.random.normal()
        self.w6 = np.random.normal()

        #biases
        self.b1 = np.random.normal()
        self.b2 = np.random.normal()
        self.b3 = np.random.normal()

    def feedforward(self,x):
        #x 是一个有两个元素的numpy数组
        h1 = sigmod(self.w1 * x[0] + self.w2 * x[1] + self.b1)
        h2 = sigmod(self.w3 * x[0] + self.w4 * x[1] + self.b2)
        o1 = sigmod(self.w5 * h1 + self.w6 * h2 + self.b3)
        return o1
    def train(self, data, all_y_trues):
        learn_rate = 0.1
        epoches = 1000

        for epoch in range(epoches):
            for x, y_true in zip(data, all_y_trues):
                sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
                h1 = sigmod(sum_h1)

                sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
                h2 = sigmod(sum_h2)

                sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3
                o1 = sigmod(sum_o1)

                y_pred = o1

                #开始计算偏导数
                #命名规则 d_L_d_w1 代表 L对w1的偏导数

                d_L_d_ypred = -2 * (y_true - y_pred)

                #Neuron o1
                d_ypred_d_w5 = h1 * deriv_sigmod(sum_o1)
                d_ypred_d_w6 = h2 * deriv_sigmod(sum_o1)
                d_ypred_d_b3 = deriv_sigmod(sum_o1)

                d_ypred_d_h1 = self.w5 * deriv_sigmod(sum_o1)
                d_ypred_d_h2 = self.w6 * deriv_sigmod(sum_o1)

                #Neuron h1
                d_h1_d_w1 = x[0] * deriv_sigmod(sum_h1)
                d_h1_d_w2 = x[1] * deriv_sigmod(sum_h1)
                d_h1_d_b1 = deriv_sigmod(sum_h1)

                #Neuron h2
                d_h2_d_w3 = x[0] * deriv_sigmod(sum_h2)
                d_h2_d_w4 = x[1] * deriv_sigmod(sum_h2)
                d_h2_d_b2 = deriv_sigmod(sum_h2)


                #---------更新权重和偏置
                #Neuron h1
                self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
                self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
                self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1

                #Neuron h2
                self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
                self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
                self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2

                #Neuron o1
                self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
                self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
                self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3

            #每个epoch结束以后计算总的损失
            if epoch % 10 == 0:
                y_preds = np.apply_along_axis(self.feedforward, 1, data)
                print (y_preds)
                loss = mse_loss(all_y_trues, y_preds)
                print ("Epoch %d loss: %.3f" % (epoch, loss))
data = np.array([
    [-2, -1],
    [25, 6],
    [17, 4],
    [-15, -6]
])

all_y_trues = np.array([1, 0, 0, 1]

)

#训练神经网络
network = OurNeuralNetwork()
network.train(data, all_y_trues)

emily = np.array([-7, -3])
frank = np.array([20, 2])

print ("Emily: %.3f" % network.feedforward(emily))
print ("Frank: %.3f" % network.feedforward(frank))

 

posted @ 2019-04-10 20:38  qilibin  阅读(759)  评论(0编辑  收藏  举报