全连接网络拟合函数曲线的一些代码

Net

复制代码
import numpy as np
from matplotlib import pyplot as plt

#sigmoid函数
def sigmoid(x):
    return 1/(1+np.exp(-x))
def d_sigmoid(dout):
    return sigmoid(dout) * (1-sigmoid(dout))

#ReLU
def ReLU(x):
    return np.maximum(0, x)
def d_ReLU(dout):
    return -np.minimum(0, dout)
    
#tanh
def tanh(x):
    tmp = np.exp(2*x)
    return (tmp-1) / (tmp+1)

def d_tanh(x):
    return 1 - tanh(x)**2




#初始化
def init(lay_size):#lay_size表示每层神经元的个数(包括了输入输出层)
    lay_num = len(lay_size)#神经元的层数
    para = {}#参数字典
    for i in range(1,lay_num):
        para['W'+str(i)] = np.random.random([lay_size[i],lay_size[i-1]])
        para['b'+str(i)] = np.zeros((lay_size[i],1))
    return para


def forward(x,para):#x表示数据
    TEMP = {}#存A和Z
    TEMP['A0'] = x#激活后
    TEMP['Z0'] = x#激活前
    lay_num = len(para)//2
    for i in range(1,lay_num):
        TEMP['Z'+str(i)] = para['W'+str(i)] @ TEMP['A'+str(i-1)] + para['b'+str(i)]
        TEMP['A'+str(i)] = tanh(TEMP['Z'+str(i)])

    #最后一层没有激活函数
    TEMP['Z'+str(lay_num)] = para['W'+str(lay_num)] @ TEMP['A'+str(lay_num-1)] + para['b'+str(lay_num)]
    TEMP['A'+str(lay_num)] = TEMP['Z'+str(lay_num)]


    return TEMP, TEMP['A'+str(lay_num)]

def backward(para,TEMP,real_y,pred_y):
    grad = {}
    m = real_y.shape[1]
    lay_num = len(para)//2
    #最后一层没有激活函数
    grad['dLay'+str(lay_num)] = pred_y - real_y
    grad['dW'+str(lay_num)] = (grad['dLay' + str(lay_num)] @ TEMP['A' + str(lay_num-1)].T) / m
    grad['db'+str(lay_num)] = np.sum(grad["dLay" + str(lay_num)] , axis = 1 , keepdims = True) /m

    for i in range(lay_num-1,0,-1):
        grad['dLay'+str(i)] = (para['W'+str(i+1)].T @ grad['dLay' + str(i+1)]) * d_tanh(TEMP['Z'+str(i)])
        grad['dW'+str(i)] = (grad['dLay' + str(i)] @ TEMP['A' + str(i-1)].T) / m
        grad['db'+str(i)] = np.sum(grad["dLay" + str(i)] , axis = 1 , keepdims = True) /m

    return grad



def grad_descent(para,grad,lr):
    lay_num = len(para)//2
    for i in range(1,lay_num+1):
        para['W'+str(i)] -= grad['dW'+str(i)] * lr
        para['b'+str(i)] -= grad["db"+str(i)] * lr
    return para


def cost(pred_y,real_y):
    return np.sum((pred_y - real_y) ** 2) / (real_y.shape[0])

def data():
    step = 0.01
    start,stop = 0,10
    row = int(abs((stop - start) / step))
    x = np.arange(start,stop,step)
    y = x**2
    plt.scatter(x,y)

    x = x.reshape(1,row)
    y = y.reshape(1,row)


    return x , y

class z_score:
    def __init__(self):
        self.var = 0
        self.avg = 0
    def zscore(self,x,command = True):
        if command == True:
            self.var = np.std(x, axis=1)
            self.avg = np.mean(x, axis=1)
            return (x - self.avg) / self.var
        elif command == False:
            return (x * self.var) + self.avg


def train():
    x,y = data()
    #标准化
    z_x = z_score()
    z_y = z_score()
    x = z_x.zscore(x,True)
    y = z_y.zscore(y,True)

    ls = [1,10,20,1]
    para = init(ls)
    out = 0
    TEMP , out = forward(x,para)
    for i in range(6000):#梯度下降的次数
        TEMP , out = forward(x,para)
        grad = backward(para,TEMP,y,out)
        para = grad_descent(para,grad,0.08)
        '''
        if i %100 == 0:
            print(cost(out,y))'''

    #去正则化
    x = z_x.zscore(x,False)
    out = z_y.zscore(out,False)

    np.save('para.npy', para)
    plt.scatter(x,out,marker = '+')
    plt.show()

train()
复制代码

Test

复制代码
import numpy as np
   
#tanh
def tanh(x):
    tmp = np.exp(2*x)
    return (tmp-1) / (tmp+1)

def d_tanh(x):
    return 1 - tanh(x)**2


def forward(x,para):#x表示数据
    TEMP = {}#存A和Z
    TEMP['A0'] = x#激活后
    TEMP['Z0'] = x#激活前
    lay_num = len(para)//2
    for i in range(1,lay_num):
        TEMP['Z'+str(i)] = para['W'+str(i)] @ TEMP['A'+str(i-1)] + para['b'+str(i)]
        TEMP['A'+str(i)] = tanh(TEMP['Z'+str(i)])

    #最后一层没有激活函数
    TEMP['Z'+str(lay_num)] = para['W'+str(lay_num)] @ TEMP['A'+str(lay_num-1)] + para['b'+str(lay_num)]
    TEMP['A'+str(lay_num)] = TEMP['Z'+str(lay_num)]


    return TEMP['A'+str(lay_num)]

class z_score:
    def __init__(self):
        self.var = 0
        self.avg = 0
    def zscore(self,x,command = True):
        if command == True:
            self.var = np.std(x, axis=1)
            self.avg = np.mean(x, axis=1)
            return (x - self.avg) / self.var
        elif command == False:
            return (x * self.var) + self.avg


#data
step = 0.01
start,stop = 0,10
row = int(abs((stop - start) / step))

x = np.arange(start,stop,step)
y = x**2

x = x.reshape(1,row)
y = y.reshape(1,row)
z_x = z_score()
z_y = z_score()
x = z_x.zscore(x,True)
y = z_y.zscore(y,True)

para = np.load('para.npy',allow_pickle=True).item()
out = 0
out = forward(x,para)

#Remove
x = z_x.zscore(x,False)
out = z_y.zscore(out,False)

wr=0
for i in range(row):
    if y[0,i] != out[0,i]:
        wr += 1
print(1-(wr/row))
复制代码

 

posted @   ZeroHzzzz  阅读(21)  评论(0编辑  收藏  举报
编辑推荐:
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
阅读排行:
· 全程不用写代码,我用AI程序员写了一个飞机大战
· DeepSeek 开源周回顾「GitHub 热点速览」
· 记一次.NET内存居高不下排查解决与启示
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· .NET10 - 预览版1新功能体验(一)
点击右上角即可分享
微信分享提示