一篇代码,机器学习入门
import numpy as np
#将数据降维的函数,输出值一定在-1~1之间,可以定性(男/女)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def deriv_sigmoid(x):
fx = sigmoid(x)
return fx * (1 - fx)
def mse_loss(y_true, y_pred):
return((y_true - y_pred) ** 2).mean()
#基本神经元,bias即-threshold阈值,即bias越低,越难得出结果,越高越容易(单调递增情况,加10)
class Neuron:
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def feedforward(self, inputs):
total = np.dot(self.weights, inputs) + self.bias
return sigmoid(total)
class OurNeuralNetwork:
def __init__(self):
## random weights and biases because it's careless
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! be mentioned.
self.w1 = np.random.normal()
self.w2 = np.random.normal()
self.w3 = np.random.normal()
self.w4 = np.random.normal()
self.w5 = np.random.normal()
self.w6 = np.random.normal()
## Biases
self.b1 = np.random.normal()
self.b2 = np.random.normal()
self.b3 = np.random.normal()
def feedforward(self, x):
self.h1 = sigmoid(self.w1 * x[0] + self.w2 * x[1] + self.b1)
self.h2 = sigmoid(self.w3 * x[0] + self.w4 * x[1] + self.b2)
return sigmoid(self.w5 * self.h1 + self.w6 * self.h2 + self.b3)
## Data is a zip of array value
def train(self, data, all_y_trues):
learn_rate = 0.1
# number of times to loop through the entire dataset
epochs = 1000
for epoch in range(epochs):
for x, y_true in zip(data, all_y_trues):
y_pred = self.feedforward(x)
# --- Do a feedforward (we'll need these values later)
sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
h1 = sigmoid(sum_h1)
sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
h2 = sigmoid(sum_h2)
sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3
o1 = sigmoid(sum_o1)
y_pred = o1
# --- Calculate partial derivatives.
# --- Naming: d_L_d_w1 represents "partial L / partial w1"
d_L_d_ypred = -2 * (y_true - y_pred)
# Neuron o1
d_ypred_d_w5 = self.h1 * deriv_sigmoid(sum_o1)
d_ypred_d_w6 = self.h2 * deriv_sigmoid(sum_o1)
d_ypred_d_b3 = deriv_sigmoid(sum_o1)
# Neuron o1
d_ypred_d_w5 = self.h1 * deriv_sigmoid(sum_o1)
d_ypred_d_w6 = self.h2 * deriv_sigmoid(sum_o1)
d_ypred_d_b3 = deriv_sigmoid(sum_o1)
d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1)
d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1)
# Neuron h1
d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)
d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)
d_h1_d_b1 = deriv_sigmoid(sum_h1)
# Neuron h2
d_h2_d_w3 = x[0] * deriv_sigmoid(sum_h2)
d_h2_d_w4 = x[1] * deriv_sigmoid(sum_h2)
d_h2_d_b2 = deriv_sigmoid(sum_h2)
# --- Update weights and biases
## Δv = -n∇C, n 为学习率,∇C为n维向量微分算子,Nabla算子
# Neuron h1
self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1
# Neuron h2
self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2
# Neuron o1
self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3
if epoch % 10 == 0:
## Apply a function to 1-D slices along the given axis.
y_preds = np.apply_along_axis(self.feedforward, 1, data)
loss = mse_loss(all_y_trues, y_preds)
print("Epoch %d loss: %.3f" % (epoch, loss))
data = np.array([
[-2, -1], # Alice
[25, 6], # Bob
[17, 4], # Charlie
[-15, -6], # Diana
])
all_y_tures = np.array([
1, # Alice
0, # Bob
0, # Charlie
1, # Diana
]
)
network = OurNeuralNetwork()
network.train(data, all_y_tures)
这是一个经典的logistic regression(即sigmoid回归),优化算法(optimizers, solver)使用SGD,不是最好的算法,但是可以补充