python,质谱数据,加噪声后用小波神经网络,二分类预测

复制代码
#库的导入
import numpy as np
import pandas as pd
import math

#激活函数
def tanh(x):
    return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
#激活函数偏导数
def de_tanh(x):
    return (1-x**2)
#小波基函数
def wavelet(x):
    return (math.cos(1.75*x)) * (np.exp((x**2)/(-2)))
#小波基函数偏导数
def de_wavelet(x):
    y = (-1) * (1.75 * math.sin(1.75 * x)  + x * math.cos(1.75 * x)) * (np.exp(( x **2)/(-2)))
    return y

#数据输入
data=pd.read_pickle('ICC_rms.pkl')
df=pd.DataFrame(data)
X = df.iloc[:, 0:510].values #所有样本的x值,0-510列 矩阵(1544,510)由此得出样本个数1544个,特征510
y = df.iloc[:, 511].values #所有样本的标签,511列 矩阵(1544,)
#把y转成1-0形式,Neurons对应0,Astrocytes对应1
Y=np.array([-1.0] * 1544)
for i in range(len(y)):
    if y[i] =='Neurons':
        Y[i]=0
    if y[i] =='Astrocytes':
        Y[i]=1
# y=['Neurons' 'Neurons' 'Neurons' ... 'Astrocytes' 'Astrocytes' 'Astrocytes']
# Y=[0. 0. 0. ... 1. 1. 1.]


# #参数设置
# samnum = 72   #输入数据数量
# hiddenunitnum = 8   #隐含层节点数
# indim = 4   #输入层节点数
# outdim = 1   #输出层节点数
# maxepochs = 500   #迭代次数
# errorfinal = 0.65*10**(-3)   #停止迭代训练条件
# learnrate = 0.001   #学习率

#参数设置
samnum = 1544   #输入数据数量
hiddenunitnum = 8   #隐含层节点数
indim = 510   #输入层节点数
outdim = 1   #输出层节点数
maxepochs = 500   #迭代次数
errorfinal = 0.65*10**(-3)   #停止迭代训练条件
learnrate = 0.001   #学习率

#输入数据的导入
df = pd.read_csv("train.csv")
df.columns = ["Co", "Cr", "Mg", "Pb", "Ti"]
Co = df["Co"]
Co = np.array(Co)
Cr = df["Cr"]
Cr = np.array(Cr)
Mg=df["Mg"]
Mg=np.array(Mg)
Pb = df["Pb"]
Pb =np.array(Pb)
Ti = df["Ti"]
# Tisample = np.array(Ti)
# sampleinshaple = np.mat([Co,Cr,Mg,Pb])
Ti = np.array(Y)
samplein = np.mat(X.T)

#数据归一化,将输入数据压缩至0到1之间,便于计算,后续通过反归一化恢复原始值
sampleinminmax = np.array([samplein.min(axis=1).T.tolist()[0],samplein.max(axis=1).T.tolist()[0]]).transpose()#对应最大值最小值
#待预测数据为Ti
sampleout = np.mat([Ti])
sampleoutminmax = np.array([sampleout.min(axis=1).T.tolist()[0],sampleout.max(axis=1).T.tolist()[0]]).transpose()#对应最大值最小值
sampleinnorm = ((np.array(samplein.T)-sampleinminmax.transpose()[0])/(sampleinminmax.transpose()[1]-sampleinminmax.transpose()[0])).transpose()
sampleoutnorm = ((np.array(sampleout.T)-sampleoutminmax.transpose()[0])/(sampleoutminmax.transpose()[1]-sampleoutminmax.transpose()[0])).transpose()

#给归一化后的数据添加噪声
noise = 0.03*np.random.rand(sampleoutnorm.shape[0],sampleoutnorm.shape[1])
sampleoutnorm += noise

#
scale = np.sqrt(3/((indim+outdim)*0.5))
w1 = np.random.uniform(low=-scale,high=scale,size=[hiddenunitnum,indim])
b = np.random.uniform(low=-scale, high=scale, size=[hiddenunitnum,1])
a = np.random.uniform(low=-scale, high=scale, size=[hiddenunitnum,1])
w2 = np.random.uniform(low=-scale,high=scale,size=[hiddenunitnum,outdim])

#对隐含层的连接权值w1、平滑因子被b和伸缩因子a、输出层的连接权值w2进行随机初始化
inputin=np.mat(sampleinnorm.T)
w1=np.mat(w1)
b=np.mat(b)
a=np.mat(a)
w2=np.mat(w2)

#errhistory存储每次迭代训练计算的误差
errhistory = np.mat(np.zeros((1,maxepochs)))
#开始训练
for i in range(maxepochs):
    #前向计算:
    #hidden_out为隐含层输出
    hidden_out = np.mat(np.zeros((samnum,hiddenunitnum)))
    for m in range(samnum):
        for j in range(hiddenunitnum):
            d=((inputin[m, :] * w1[j, :].T) - b[j,:]) * (a[j,:] ** (-1))
            hidden_out[m,j] = wavelet(d)
    #output为输出层输出
    output = tanh(hidden_out * w2)
    #计算误差
    out_real = np.mat(sampleoutnorm.transpose())
    err = out_real - output
    loss = np.sum(np.square(err))
    #判断是否停止训练
    if loss < errorfinal:
        break
    errhistory[:,i] = loss
    #反向计算
    out_put=np.array(output.T)
    belta=de_tanh(out_put).transpose()
    #分别计算每个参数的误差项
    for j in range(hiddenunitnum):
        sum1 = 0.0
        sum2 = 0.0
        sum3 = 0.0
        sum4 = 0.0
        sum5 = 0.0
        for m in range(samnum):
            sum1+= err[m,:] * belta[m,:] * w2[j,:] * de_wavelet(hidden_out[m,j]) * (inputin[m,:] / a[j,:])
            #1*1
            sum2+= err[m,:] * belta[m,:] * w2[j,:] * de_wavelet(hidden_out[m,j]) * (-1) * (1 / a[j,:])
            #1*1
            sum3+= err[m,:] * belta[m,:] * w2[j,:] * de_wavelet(hidden_out[m,j]) * (-1) * ((inputin[m,:] * w1[j,:].T - b[j,:]) / (a[j,:] * a[j,:]))
            #1*1
            sum4+= err[m,:] * belta[m,:] * hidden_out[m,j]
        delta_w1 = sum1
        delta_b = sum2
        delta_a = sum3
        delta_w2 = sum4
        #根据误差项对四个参数进行更新
        w1[j,:] = w1[j,:] + learnrate * delta_w1
        b[j,:] = b[j,:] + learnrate * delta_b
        a[j,:] = a[j,:] + learnrate * delta_a
        w2[j,:] = w2[j,:] + learnrate * delta_w2
    print("the generation is:",i+1,",the loss is:",loss)

print('更新的w1:',w1)
print('更新的b:',b)
print('更新的w2:',w2)
print('更新的a:',a)
print("The loss after iteration is :",loss)

np.save("w1.npy",w1)
np.save("b.npy",b)
np.save("w2.npy",w2)
np.save("a.npy",a)
复制代码

迭代500次后:The loss after iteration is : 883.4984505986399

期间loss上蹿下跳,梯度消失,可以尝试调低学习率

另外:原问题是时序预测的回归问题,现在改成二分类需要改loss

7.18 修改了损失函数,改为BinaryCrossentropy二元交叉熵

修改后的代码

复制代码
#库的导入
import numpy as np
import pandas as pd
import math
import torch
import torch.nn.functional as F
import tensorflow as tf
from tensorflow.keras.losses import BinaryCrossentropy
losscc = BinaryCrossentropy()

#激活函数
def tanh(x):
    return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
#激活函数偏导数
def de_tanh(x):
    return (1-x**2)
#小波基函数
def wavelet(x):
    return (math.cos(1.75*x)) * (np.exp((x**2)/(-2)))
#小波基函数偏导数
def de_wavelet(x):
    y = (-1) * (1.75 * math.sin(1.75 * x)  + x * math.cos(1.75 * x)) * (np.exp(( x **2)/(-2)))
    return y

#数据输入
data=pd.read_pickle('ICC_rms.pkl')
df=pd.DataFrame(data)
X = df.iloc[:, 0:510].values #所有样本的x值,0-510列 矩阵(1544,510)由此得出样本个数1544个,特征510
y = df.iloc[:, 511].values #所有样本的标签,511列 矩阵(1544,)
#把y转成1-0形式,Neurons对应0,Astrocytes对应1
Y=np.array([-1.0] * 1544)
for i in range(len(y)):
    if y[i] =='Neurons':
        Y[i]=0
    if y[i] =='Astrocytes':
        Y[i]=1
# y=['Neurons' 'Neurons' 'Neurons' ... 'Astrocytes' 'Astrocytes' 'Astrocytes']
# Y=[0. 0. 0. ... 1. 1. 1.]


# #参数设置
# samnum = 72   #输入数据数量
# hiddenunitnum = 8   #隐含层节点数
# indim = 4   #输入层节点数
# outdim = 1   #输出层节点数
# maxepochs = 500   #迭代次数
# errorfinal = 0.65*10**(-3)   #停止迭代训练条件
# learnrate = 0.001   #学习率

#参数设置
samnum = 1544   #输入数据数量
hiddenunitnum = 8   #隐含层节点数
indim = 510   #输入层节点数
outdim = 1   #输出层节点数
maxepochs = 500   #迭代次数
errorfinal = 0.65*10**(-3)   #停止迭代训练条件
learnrate = 0.001   #学习率

#输入数据的导入
df = pd.read_csv("train.csv")
df.columns = ["Co", "Cr", "Mg", "Pb", "Ti"]
Co = df["Co"]
Co = np.array(Co)
Cr = df["Cr"]
Cr = np.array(Cr)
Mg=df["Mg"]
Mg=np.array(Mg)
Pb = df["Pb"]
Pb =np.array(Pb)
Ti = df["Ti"]
# Tisample = np.array(Ti)
# sampleinshaple = np.mat([Co,Cr,Mg,Pb])
Ti = np.array(Y)
samplein = np.mat(X.T)

#数据归一化,将输入数据压缩至0到1之间,便于计算,后续通过反归一化恢复原始值
sampleinminmax = np.array([samplein.min(axis=1).T.tolist()[0],samplein.max(axis=1).T.tolist()[0]]).transpose()#对应最大值最小值
#待预测数据为Ti
sampleout = np.mat([Ti])
sampleoutminmax = np.array([sampleout.min(axis=1).T.tolist()[0],sampleout.max(axis=1).T.tolist()[0]]).transpose()#对应最大值最小值
sampleinnorm = ((np.array(samplein.T)-sampleinminmax.transpose()[0])/(sampleinminmax.transpose()[1]-sampleinminmax.transpose()[0])).transpose()
sampleoutnorm = ((np.array(sampleout.T)-sampleoutminmax.transpose()[0])/(sampleoutminmax.transpose()[1]-sampleoutminmax.transpose()[0])).transpose()

#给归一化后的数据添加噪声
noise = 0.03*np.random.rand(sampleoutnorm.shape[0],sampleoutnorm.shape[1])
sampleoutnorm += noise

#
scale = np.sqrt(3/((indim+outdim)*0.5))
w1 = np.random.uniform(low=-scale,high=scale,size=[hiddenunitnum,indim])
b = np.random.uniform(low=-scale, high=scale, size=[hiddenunitnum,1])
a = np.random.uniform(low=-scale, high=scale, size=[hiddenunitnum,1])
w2 = np.random.uniform(low=-scale,high=scale,size=[hiddenunitnum,outdim])

#对隐含层的连接权值w1、平滑因子被b和伸缩因子a、输出层的连接权值w2进行随机初始化
inputin=np.mat(sampleinnorm.T)
w1=np.mat(w1)
b=np.mat(b)
a=np.mat(a)
w2=np.mat(w2)

#errhistory存储每次迭代训练计算的误差
errhistory = np.mat(np.zeros((1,maxepochs)))
#开始训练
for i in range(maxepochs):
    #前向计算:
    #hidden_out为隐含层输出
    hidden_out = np.mat(np.zeros((samnum,hiddenunitnum)))
    for m in range(samnum):
        for j in range(hiddenunitnum):
            d=((inputin[m, :] * w1[j, :].T) - b[j,:]) * (a[j,:] ** (-1))
            hidden_out[m,j] = wavelet(d)
    #output为输出层输出
    output = tanh(hidden_out * w2)
    #计算误差
    out_real = np.mat(sampleoutnorm.transpose())
    err = out_real - output
    loss = losscc(out_real,output)
    #判断是否停止训练
    if loss < errorfinal:
        break
    errhistory[:,i] = loss
    #反向计算
    out_put=np.array(output.T)
    belta=de_tanh(out_put).transpose()
    #分别计算每个参数的误差项
    for j in range(hiddenunitnum):
        sum1 = 0.0
        sum2 = 0.0
        sum3 = 0.0
        sum4 = 0.0
        sum5 = 0.0
        for m in range(samnum):
            sum1+= err[m,:] * belta[m,:] * w2[j,:] * de_wavelet(hidden_out[m,j]) * (inputin[m,:] / a[j,:])
            #1*1
            sum2+= err[m,:] * belta[m,:] * w2[j,:] * de_wavelet(hidden_out[m,j]) * (-1) * (1 / a[j,:])
            #1*1
            sum3+= err[m,:] * belta[m,:] * w2[j,:] * de_wavelet(hidden_out[m,j]) * (-1) * ((inputin[m,:] * w1[j,:].T - b[j,:]) / (a[j,:] * a[j,:]))
            #1*1
            sum4+= err[m,:] * belta[m,:] * hidden_out[m,j]
        delta_w1 = sum1
        delta_b = sum2
        delta_a = sum3
        delta_w2 = sum4
        #根据误差项对四个参数进行更新
        w1[j,:] = w1[j,:] + learnrate * delta_w1
        b[j,:] = b[j,:] + learnrate * delta_b
        a[j,:] = a[j,:] + learnrate * delta_a
        w2[j,:] = w2[j,:] + learnrate * delta_w2
    print("the generation is:",i+1,",the loss is:",loss)

print('更新的w1:',w1)
print('更新的b:',b)
print('更新的w2:',w2)
print('更新的a:',a)
print("The loss after iteration is :",loss)

np.save("w1.npy",w1)
np.save("b.npy",b)
np.save("w2.npy",w2)
np.save("a.npy",a)
复制代码

报错

2023-07-18 20:08:32.750151: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2023-07-18 20:08:33.578350: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 9601 MB memory:  -> device: 0, name: NVIDIA GeForce RTX 3060, pci bus id: 0000:01:00.0, compute capability: 8.6

是一个CPU优化的问题,不影响运行结果先不管他

输出

复制代码
更新的w1: [[-0.07068373 -0.02922569  0.01600041 ...  0.00161731  0.05816412
   0.07385217]
 [-0.04781427 -0.12022122  0.0591721  ...  0.00910636 -0.01597548
  -0.10810779]
 [ 0.04393831 -0.0093757  -0.08421552 ... -0.01431543 -0.08981475
  -0.09836085]
 ...
 [ 0.02125342  0.07427706  0.04561648 ...  0.09757433 -0.10945643
  -0.02225736]
 [-0.00642149 -0.10466258 -0.04414041 ... -0.07586007  0.09606159
  -0.10095507]
 [ 0.07836856  0.09866979  0.08790088 ...  0.11377285  0.13028167
   0.13548066]]
更新的b: [[ 0.34133883]
 [ 0.44091272]
 [ 1.02525526]
 [-0.25850262]
 [-0.93522212]
 [ 0.56359322]
 [ 0.33065068]
 [-1.24233186]]
更新的w2: [[ 0.22237583]
 [-0.11835115]
 [ 0.50352054]
 [ 0.17664935]
 [-0.48654281]
 [ 0.54192351]
 [ 0.42188033]
 [ 0.53363458]]
更新的a: [[-0.03504681]
 [ 0.29143878]
 [-0.89396863]
 [ 0.21286339]
 [-0.5527912 ]
 [-0.45058405]
 [ 0.07456687]
 [ 1.0447857 ]]
The loss after iteration is : tf.Tensor(6.856029987335205, shape=(), dtype=float64)

Process finished with exit code 0
复制代码

问题是刚开始的loss是三点多,然后骤增到八点多,然后一点点下降到六点多,后面就是在六点多到四点多之间来回震荡

现阶段卡在了test.py,感觉是loss还得改,源代码的loss还是针对回归问题的,分类问题不能用,返回去看一下源代码为什么这么写loss

7.19

去掉了计算误差这个环节,但是为什么prediction的不是0和1啊?

test代码

复制代码
#库的导入
import numpy as np
import pandas as pd
import math
from tensorflow.keras.losses import BinaryCrossentropy
losscc = BinaryCrossentropy()

#小波基函数
def wavelet(x):
    return (math.cos(1.75*x)) * (np.exp((x**2)/(-2)))
#激活函数tanh
def tanh(x):
    return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))

#数据输入
data=pd.read_pickle('ICC_rms.pkl')
df=pd.DataFrame(data)
X = df.iloc[:, 0:510].values #所有样本的x值,0-510列 矩阵(1544,510)由此得出样本个数1544个,特征510
y = df.iloc[:, 511].values #所有样本的标签,511列 矩阵(1544,)
#把y转成1-0形式,Neurons对应0,Astrocytes对应1
Y=np.array([-1.0] * 1544)
for i in range(len(y)):
    if y[i] =='Neurons':
        Y[i]=0
    if y[i] =='Astrocytes':
        Y[i]=1

#输入数据的导入,用于测试数据的归一化与返归一化
df = pd.read_csv("train.csv")
df.columns = ["Co", "Cr", "Mg", "Pb", "Ti"]
Co = df["Co"]
Co = np.array(Co)
Cr = df["Cr"]
Cr = np.array(Cr)
Mg=df["Mg"]
Mg=np.array(Mg)
Pb = df["Pb"]
Pb =np.array(Pb)
Ti = df["Ti"]
Tisample = np.array(Ti)
sampleinsample = np.mat([Co,Cr,Mg,Pb])
Ti = np.array(Y)
samplein = np.mat(X.T)
sampleinminmax = np.array([samplein.min(axis=1).T.tolist()[0],samplein.max(axis=1).T.tolist()[0]]).transpose()#对应最大值最小值
sampleout = np.mat([Ti])
sampleoutminmax = np.array([sampleout.min(axis=1).T.tolist()[0],sampleout.max(axis=1).T.tolist()[0]]).transpose()#对应最大值最小值

#导入WNN.py训练好的参数
w1=np.load('w1.npy')
b=np.load('b.npy')
a=np.load('a.npy')
w2=np.load('w2.npy')
w1 = np.mat(w1)
w2 = np.mat(w2)
b = np.mat(b)
a = np.mat(a)

#隐含层节点数
hiddenunitnum = 8
#测试数据数量
testnum = 24


#测试数据的导入
df = pd.read_csv("test.csv")
df.columns = ["Co", "Cr", "Mg", "Pb", "Ti"]
Co = df["Co"]
Co = np.array(Co)
Cr = df["Cr"]
Cr = np.array(Cr)
Mg=df["Mg"]
Mg=np.array(Mg)
Pb = df["Pb"]
Pb =np.array(Pb)
Ti = df["Ti"]
# Ti = np.array(Ti)
# input=np.mat([Co,Cr,Mg,Pb])
Ti = np.array(Y)
input = np.mat(X.T)

#测试数据中输入数据的归一化
inputnorm=(np.array(input.T)-sampleinminmax.transpose()[0])/(sampleinminmax.transpose()[1]-sampleinminmax.transpose()[0])
#hidden_out2用于保存隐含层输出
hidden_out = np.mat(np.zeros((testnum,hiddenunitnum)))
#计算隐含层输出
for m in range(testnum):
    for j in range(hiddenunitnum):
        d = ((inputnorm[m, :] * w1[j, :].T) - b[j, :]) * (a[j, :] ** (-1))
        hidden_out[m, j] = wavelet(d)
#计算输出层输出
output = tanh(hidden_out * w2 )
#对输出结果进行反归一化
diff = sampleoutminmax[:,1]-sampleoutminmax[:,0]
networkout2 = output*diff+sampleoutminmax[0][0]
networkout2 = np.array(networkout2).transpose()
output1=networkout2.flatten()#降成一维数组
output1=output1.tolist()
for i in range(testnum):
    output1[i] = float('%.2f'%output1[i])
print("the prediction is:",output1)

# #将输出结果与真实值进行对比,计算误差
# output=Ti
# rmse = (np.sum(np.square(output-output1))/len(output)) ** 0.5
# mae = np.sum(np.abs(output-output1))/len(output)
# #average_loss1=np.sum(np.abs((output-output1)/output))/len(output)
# average_loss1=losscc(output,output1)
# mape="%.2f%%"%(average_loss1*100)
# f1 = 0
# for m in range(testnum):
#     f1 = f1 + np.abs(output[m]-output1[m])/((np.abs(output[m])+np.abs(output1[m]))/2)
# f2 = f1 / testnum
# smape="%.2f%%"%(f2*100)
# print("the MAE is :",mae)
# print("the RMSE is :",rmse)
# print("the MAPE is :",mape)
# print("the SMAPE is :",smape)

#计算预测值与真实值误差与真实值之比的分布
A=0
B=0
C=0
D=0
E=0
for m in range(testnum):
    y1 = np.abs(output[m]-output1[m])/np.abs(output[m])
    if y1 <= 0.1:
        A = A + 1
    elif y1 > 0.1 and y1 <= 0.2:
        B = B + 1
    elif y1 > 0.2 and y1 <= 0.3:
        C = C + 1
    elif y1 > 0.3 and y1 <= 0.4:
        D = D + 1
    else:
        E = E + 1
print("Ratio <= 0.1 :",A)
print("0.1< Ratio <= 0.2 :",B)
print("0.2< Ratio <= 0.3 :",C)
print("0.3< Ratio <= 0.4 :",D)
print("Ratio > 0.4 :",E)
复制代码

结果

the prediction is: [0.0, -0.0, 0.0, 0.0, -0.0, 0.01, 0.0, -0.0, -0.0, -0.0, 0.01, 0.01, -0.0, 0.01, 0.01, 0.02, -0.0, 0.0, 0.01, 0.0, 0.01, -0.0, 0.01, 0.0]
Ratio <= 0.1 : 3
0.1< Ratio <= 0.2 : 1
0.2< Ratio <= 0.3 : 1
0.3< Ratio <= 0.4 : 2
Ratio > 0.4 : 17

Process finished with exit code 0

 

posted @   奋发图强的小赵  阅读(89)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· 三行代码完成国际化适配,妙~啊~
· .NET Core 中如何实现缓存的预热?
点击右上角即可分享
微信分享提示