深度学习前馈神经网络实验

深度学习前馈神经网络实验

1、手动实现前馈神经网络解决回归问题

#导入必要的包
import torch  
import numpy as np  
import random  
from IPython import display  
from matplotlib import pyplot as plt  
import torch.utils.data as Data  
#自定义数据---训练集  
num_inputs = 500  
num_examples = 10000  
true_w = torch.ones(500,1)*0.0056  
true_b = 0.028  
x_features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)  
y_labels = torch.mm(x_features,true_w) + true_b  
y_labels += torch.tensor(np.random.normal(0, 0.01, size=y_labels.size()), dtype=torch.float)  
#训练集  
trainfeatures =x_features[:7000]  
trainlabels = y_labels[:7000]  
print(trainfeatures.shape)  
#测试集  
testfeatures =x_features[7000:]  
testlabels = y_labels[7000:]  
print(testfeatures.shape) 
torch.Size([7000, 500])
torch.Size([3000, 500])
#读取数据  
batch_size = 50  
# 将训练数据的特征和标签组合  
dataset = Data.TensorDataset(trainfeatures, trainlabels)  
train_iter = Data.DataLoader(  
    dataset=dataset, # torch TensorDataset format  
    batch_size=batch_size, # mini batch size  
    shuffle=True, # 是否打乱数据 (训练集一般需要进行打乱)  
    num_workers=0, # 多线程来读数据, 注意在Windows下需要设置为0  
)  
# 将测试数据的特征和标签组合  
dataset = Data.TensorDataset(testfeatures, testlabels)  
# 把 dataset 放入 DataLoader  
test_iter = Data.DataLoader(  
    dataset=dataset, # torch TensorDataset format  
    batch_size=batch_size, # mini batch size  
    shuffle=True, # 是否打乱数据 
    num_workers=0, # 多线程来读数据, 注意在Windows下需要设置为0  
)
#初始化参数  
num_hiddens,num_outputs = 256,1  
 
W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens,num_inputs)), dtype=torch.float32)  
b1 = torch.zeros(1, dtype=torch.float32)  
W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs,num_hiddens)), dtype=torch.float32)  
b2 = torch.zeros(1, dtype=torch.float32)  
params =[W1,b1,W2,b2]  
for param in params:  
    param.requires_grad_(requires_grad=True)  
#自定义relu激活函数
def relu(x):  
    x = torch.max(input=x,other=torch.tensor(0.0))  
    return x 
#定义模型  
def net(X):  
    X = X.view((-1,num_inputs))  
    H = relu(torch.matmul(X,W1.t())+b1)  #经过第一层(包括激活函数)
    return torch.matmul(H,W2.t())+b2     #第二层
#定义最小化均方误差  
loss = torch.nn.MSELoss()  
  
#定义随机梯度下降法  
def SGD(paras,lr,batch_size):  
    for param in params:  
        param.data -= lr * param.grad/batch_size  
#定义模型训练函数  
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):  
    train_ls = []  
    test_ls = []  
    for epoch in range(num_epochs): # 训练模型一共需要num_epochs个迭代周期  
        train_l_sum, train_acc_num,n = 0.0,0.0,0  
        # 在每一个迭代周期中,会使用训练数据集中所有样本一次  
        for X, y in train_iter: # x和y分别是小批量样本的特征和标签  
            y_hat = net(X)  
            l = loss(y_hat, y.view(-1,1)) # l是有关小批量X和y的损失  
            #梯度清零  
            if optimizer is not None:  #手动实现梯度清零
                optimizer.zero_grad()  
            elif params is not None and params[0].grad is not None:  
                for param in params:  
                    param.grad.data.zero_()  
            l.backward() # 小批量的损失对模型参数求梯度  
            if optimizer is None:  
                SGD(params,lr,batch_size)  
            else:  
                optimizer.step()  
            #计算每个epoch的loss  
            train_l_sum += l.item()*y.shape[0]   
            n+= y.shape[0]  
        test_labels = testlabels.view(-1,1)  
        train_ls.append(train_l_sum/n)  
        test_ls.append(loss(net(testfeatures),test_labels).item())  
        print('epoch %d, train_loss %.6f,test_loss %.6f'%(epoch+1, train_ls[epoch],test_ls[epoch]))
    return train_ls,test_ls  
lr = 0.01  #学习率
num_epochs = 50  #迭代次数
train_loss,test_loss = train(net,train_iter,test_iter,loss,num_epochs,batch_size,params,lr)  #开始训练
epoch 1, train_loss 0.017192,test_loss 0.017365
epoch 2, train_loss 0.016870,test_loss 0.017036
epoch 3, train_loss 0.016633,test_loss 0.016788
epoch 4, train_loss 0.016457,test_loss 0.016599
epoch 5, train_loss 0.016324,test_loss 0.016453
epoch 6, train_loss 0.016222,test_loss 0.016337
epoch 7, train_loss 0.016142,test_loss 0.016246
epoch 8, train_loss 0.016079,test_loss 0.016171
epoch 9, train_loss 0.016026,test_loss 0.016108
epoch 10, train_loss 0.015980,test_loss 0.016055
epoch 11, train_loss 0.015940,test_loss 0.016008
epoch 12, train_loss 0.015905,test_loss 0.015967
epoch 13, train_loss 0.015872,test_loss 0.015929
epoch 14, train_loss 0.015841,test_loss 0.015895
epoch 15, train_loss 0.015812,test_loss 0.015863
epoch 16, train_loss 0.015784,test_loss 0.015834
epoch 17, train_loss 0.015756,test_loss 0.015805
epoch 18, train_loss 0.015730,test_loss 0.015778
epoch 19, train_loss 0.015703,test_loss 0.015752
epoch 20, train_loss 0.015678,test_loss 0.015726
epoch 21, train_loss 0.015652,test_loss 0.015702
epoch 22, train_loss 0.015627,test_loss 0.015678
epoch 23, train_loss 0.015602,test_loss 0.015654
epoch 24, train_loss 0.015576,test_loss 0.015631
epoch 25, train_loss 0.015552,test_loss 0.015607
epoch 26, train_loss 0.015527,test_loss 0.015584
epoch 27, train_loss 0.015502,test_loss 0.015562
epoch 28, train_loss 0.015478,test_loss 0.015540
epoch 29, train_loss 0.015453,test_loss 0.015517
epoch 30, train_loss 0.015429,test_loss 0.015495
epoch 31, train_loss 0.015404,test_loss 0.015473
epoch 32, train_loss 0.015380,test_loss 0.015451
epoch 33, train_loss 0.015356,test_loss 0.015430
epoch 34, train_loss 0.015332,test_loss 0.015408
epoch 35, train_loss 0.015308,test_loss 0.015387
epoch 36, train_loss 0.015284,test_loss 0.015365
epoch 37, train_loss 0.015260,test_loss 0.015344
epoch 38, train_loss 0.015236,test_loss 0.015323
epoch 39, train_loss 0.015212,test_loss 0.015302
epoch 40, train_loss 0.015188,test_loss 0.015281
epoch 41, train_loss 0.015164,test_loss 0.015260
epoch 42, train_loss 0.015141,test_loss 0.015239
epoch 43, train_loss 0.015117,test_loss 0.015218
epoch 44, train_loss 0.015094,test_loss 0.015197
epoch 45, train_loss 0.015070,test_loss 0.015176
epoch 46, train_loss 0.015047,test_loss 0.015155
epoch 47, train_loss 0.015023,test_loss 0.015134
epoch 48, train_loss 0.015000,test_loss 0.015114
epoch 49, train_loss 0.014976,test_loss 0.015093
epoch 50, train_loss 0.014953,test_loss 0.015072
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))  
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)  
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)  
plt.xlabel("epoch")  
plt.ylabel("loss")  
plt.legend()  
plt.show()  

png

2、手动实现前馈神经网络解决二分类问题

#导入必要的包
import numpy as np
import torch
from torch import nn
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt 
from torch.utils.data import TensorDataset,DataLoader
#创建数据集
num_example,num_input = 10000,200
x1 = torch.normal(-2,1,size = (num_example,num_input)) #均值为-2,标准差为1
y1 = torch.ones((num_example,1))
x2 = torch.normal(2,1,size = (num_example,num_input))  #均值为2,标准差为1
y2 = torch.zeros((num_example,1))
x = torch.cat((x1,x2),dim = 0)
y = torch.cat((y1,y2),dim = 0)

#划分训练集和测试集 训练集:测试集 = 7:3
train_x,test_x,train_y,test_y =train_test_split(x,y,shuffle = True,stratify = y,random_state= 1,test_size=0.3)
print(train_x.shape)
print(test_x.shape)
torch.Size([14000, 200])
torch.Size([6000, 200])
#读取数据
batch_size = 50

#训练集
train_dataset = TensorDataset(train_x,train_y)
train_iter = DataLoader(
    dataset = train_dataset,
    shuffle = True,
    batch_size = batch_size,
    num_workers = 0
)

#测试集
test_dataset = TensorDataset(test_x,test_y)
test_iter = DataLoader(
    dataset = test_dataset,
    shuffle = True,
    batch_size = batch_size,
    num_workers = 0
)
#初始化参数
w=torch.tensor(np.random.normal(0,0.01,(200,1)),dtype=torch.float32)
b=torch.zeros(1,dtype=torch.float32)
#对参数梯度进行追踪
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)

#实现逻辑回归
def logits(X, w, b):
    y = torch.mm(X, w) + b 
    return  1/(1+torch.pow(np.e,-y)) #加一个sigmoid层,使数据归一到(0,1)里面
#查看参数情况
#print(np.unique(y))
#np.sum([ele.data==1  for ele in train_y.flatten() ])
#np.sum([ele.data==0  for ele in train_y.flatten() ])
#隐藏层单元个数256个
num_hidden,num_output = 256,1
torch.set_default_tensor_type = torch.float32
# w1 = torch.tensor(torch.normal(0,0.001,size = (num_hidden,num_input))).type(torch.float32)
w1 = torch.tensor(torch.normal(0,0.001,size = (num_hidden,num_input))).type(torch.FloatTensor)
b1 = torch.ones(1)
# b1 = torch.ones(1,dtype = torch.float32)
print(w1.dtype)
print(b1.dtype)
      
w2 = torch.tensor(torch.normal(0,0.001,size = (num_output,num_hidden)),dtype = torch.float32)
b2 = torch.ones(1)    
params = [w1,w2,b1,b2]

for param in params:  
    param.requires_grad_(requires_grad=True)  
torch.float32
torch.float32


E:\Anaconda3\envs\torch_gpu\lib\site-packages\ipykernel_launcher.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
  after removing the cwd from sys.path.
E:\Anaconda3\envs\torch_gpu\lib\site-packages\ipykernel_launcher.py:10: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
  # Remove the CWD from sys.path while we load stuff.
#激活函数
def ReLU(x):
    return torch.max(x,other = torch.tensor(0.0))

loss = nn.BCEWithLogitsLoss()
# loss = nn.BCELoss()

#定义模型
def net(x):
    H1 = ReLU(torch.matmul(x,w1.t())+b1)
    H2 = torch.matmul(H1,w2.t())+b2
    return H2

#优化器
def SGD(params,lr):
    param.data -= lr*param.grad/batch_size
#定义训练函数
def train(net ,train_iter,test_iter,lr,num_epochs,params):
    train_l,test_l = [],[]
    for epoch in range(num_epochs):
        train_l_sum,n = 0,0
        for x,y in train_iter:
            n += y.shape[0] 
            y_pred = net(x)
            l = loss(y_pred,y)
            train_l_sum += l.item()
            if params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()

            l.backward()
            SGD(params,lr)
        train_l.append(train_l_sum/n)
        
        test_l_sum,n = 0,0
        for x,y in test_iter:
            n += y.shape[0] 
            y_pred = net(x)
            l = loss(y_pred,y)
            test_l_sum += l.item()
        test_l.append(test_l_sum/n)
        print('epoch %d, train_loss %.6f,test_loss %.6f'%(epoch+1, train_l[epoch],test_l[epoch]))
    return train_l,test_l
lr, num_epochs = 0.01,100 #学习率和训练次数
train_loss,test_loss =train(net ,train_iter,test_iter,lr,num_epochs,params) #开始训练
epoch 1, train_loss 0.016177,test_loss 0.016148
epoch 2, train_loss 0.016120,test_loss 0.016092
epoch 3, train_loss 0.016064,test_loss 0.016036
epoch 4, train_loss 0.016009,test_loss 0.015982
epoch 5, train_loss 0.015956,test_loss 0.015929
epoch 6, train_loss 0.015903,test_loss 0.015877
epoch 7, train_loss 0.015852,test_loss 0.015827
epoch 8, train_loss 0.015802,test_loss 0.015777
epoch 9, train_loss 0.015753,test_loss 0.015729
epoch 10, train_loss 0.015706,test_loss 0.015682
epoch 11, train_loss 0.015659,test_loss 0.015636
epoch 12, train_loss 0.015613,test_loss 0.015591
epoch 13, train_loss 0.015569,test_loss 0.015547
epoch 14, train_loss 0.015526,test_loss 0.015504
epoch 15, train_loss 0.015483,test_loss 0.015462
epoch 16, train_loss 0.015442,test_loss 0.015421
epoch 17, train_loss 0.015401,test_loss 0.015381
epoch 18, train_loss 0.015362,test_loss 0.015342
epoch 19, train_loss 0.015323,test_loss 0.015304
epoch 20, train_loss 0.015286,test_loss 0.015267
epoch 21, train_loss 0.015249,test_loss 0.015231
epoch 22, train_loss 0.015213,test_loss 0.015195
epoch 23, train_loss 0.015178,test_loss 0.015161
epoch 24, train_loss 0.015144,test_loss 0.015127
epoch 25, train_loss 0.015111,test_loss 0.015094
epoch 26, train_loss 0.015078,test_loss 0.015062
epoch 27, train_loss 0.015046,test_loss 0.015031
epoch 28, train_loss 0.015015,test_loss 0.015000
epoch 29, train_loss 0.014985,test_loss 0.014970
epoch 30, train_loss 0.014956,test_loss 0.014941
epoch 31, train_loss 0.014927,test_loss 0.014913
epoch 32, train_loss 0.014899,test_loss 0.014885
epoch 33, train_loss 0.014872,test_loss 0.014858
epoch 34, train_loss 0.014845,test_loss 0.014832
epoch 35, train_loss 0.014819,test_loss 0.014807
epoch 36, train_loss 0.014794,test_loss 0.014782
epoch 37, train_loss 0.014770,test_loss 0.014757
epoch 38, train_loss 0.014746,test_loss 0.014734
epoch 39, train_loss 0.014722,test_loss 0.014711
epoch 40, train_loss 0.014699,test_loss 0.014688
epoch 41, train_loss 0.014677,test_loss 0.014666
epoch 42, train_loss 0.014655,test_loss 0.014645
epoch 43, train_loss 0.014634,test_loss 0.014624
epoch 44, train_loss 0.014614,test_loss 0.014604
epoch 45, train_loss 0.014594,test_loss 0.014584
epoch 46, train_loss 0.014574,test_loss 0.014564
epoch 47, train_loss 0.014555,test_loss 0.014546
epoch 48, train_loss 0.014537,test_loss 0.014527
epoch 49, train_loss 0.014519,test_loss 0.014510
epoch 50, train_loss 0.014501,test_loss 0.014492
epoch 51, train_loss 0.014484,test_loss 0.014475
epoch 52, train_loss 0.014467,test_loss 0.014459
epoch 53, train_loss 0.014451,test_loss 0.014443
epoch 54, train_loss 0.014435,test_loss 0.014427
epoch 55, train_loss 0.014420,test_loss 0.014412
epoch 56, train_loss 0.014405,test_loss 0.014397
epoch 57, train_loss 0.014390,test_loss 0.014383
epoch 58, train_loss 0.014376,test_loss 0.014369
epoch 59, train_loss 0.014362,test_loss 0.014355
epoch 60, train_loss 0.014348,test_loss 0.014342
epoch 61, train_loss 0.014335,test_loss 0.014329
epoch 62, train_loss 0.014322,test_loss 0.014316
epoch 63, train_loss 0.014310,test_loss 0.014304
epoch 64, train_loss 0.014298,test_loss 0.014292
epoch 65, train_loss 0.014286,test_loss 0.014280
epoch 66, train_loss 0.014275,test_loss 0.014269
epoch 67, train_loss 0.014263,test_loss 0.014258
epoch 68, train_loss 0.014252,test_loss 0.014247
epoch 69, train_loss 0.014242,test_loss 0.014237
epoch 70, train_loss 0.014232,test_loss 0.014226
epoch 71, train_loss 0.014221,test_loss 0.014216
epoch 72, train_loss 0.014212,test_loss 0.014207
epoch 73, train_loss 0.014202,test_loss 0.014197
epoch 74, train_loss 0.014193,test_loss 0.014188
epoch 75, train_loss 0.014184,test_loss 0.014179
epoch 76, train_loss 0.014175,test_loss 0.014171
epoch 77, train_loss 0.014167,test_loss 0.014162
epoch 78, train_loss 0.014158,test_loss 0.014154
epoch 79, train_loss 0.014150,test_loss 0.014146
epoch 80, train_loss 0.014142,test_loss 0.014138
epoch 81, train_loss 0.014135,test_loss 0.014131
epoch 82, train_loss 0.014127,test_loss 0.014123
epoch 83, train_loss 0.014120,test_loss 0.014116
epoch 84, train_loss 0.014113,test_loss 0.014109
epoch 85, train_loss 0.014106,test_loss 0.014103
epoch 86, train_loss 0.014099,test_loss 0.014096
epoch 87, train_loss 0.014093,test_loss 0.014090
epoch 88, train_loss 0.014086,test_loss 0.014083
epoch 89, train_loss 0.014080,test_loss 0.014077
epoch 90, train_loss 0.014074,test_loss 0.014071
epoch 91, train_loss 0.014068,test_loss 0.014066
epoch 92, train_loss 0.014063,test_loss 0.014060
epoch 93, train_loss 0.014057,test_loss 0.014054
epoch 94, train_loss 0.014052,test_loss 0.014049
epoch 95, train_loss 0.014047,test_loss 0.014044
epoch 96, train_loss 0.014042,test_loss 0.014039
epoch 97, train_loss 0.014037,test_loss 0.014034
epoch 98, train_loss 0.014032,test_loss 0.014029
epoch 99, train_loss 0.014027,test_loss 0.014025
epoch 100, train_loss 0.014023,test_loss 0.014020
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))  
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)  
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)  
plt.xlabel("epoch")  
plt.ylabel("loss")  
plt.legend()  
plt.show()  

png

3、手动实现前馈神经网络解决多分类问题

#导入必要的包
import torch
import numpy as np
import random
from IPython import  display
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,TensorDataset
from torchvision import transforms,datasets
from torch import nn
#获取数据
mnist_train = datasets.MNIST(root = './data',train = True,download = False,transform =transforms.ToTensor())
mnist_test = datasets.MNIST(root ='./data',train = False,download = False,transform = transforms.ToTensor())

#训练集
batch_size = 256
train_iter = DataLoader( 
    dataset = mnist_train,
    shuffle = True,
    batch_size = batch_size,
    num_workers = 0
)
#测试集
test_iter = DataLoader(
    dataset  = mnist_test,
    shuffle  =False,
    batch_size = batch_size,
    num_workers = 0
)
#参数初始化
num_input ,num_hiddens ,num_output = 784,256,10
W1 =  torch.tensor(np.random.normal(0,0.01,size = (num_hiddens,num_input)),dtype = torch.float32)
b1 = torch.zeros(1,dtype = torch.float32)

W2 =  torch.tensor(np.random.normal(0,0.01,size = (num_output,num_hiddens)),dtype = torch.float32)
b2 = torch.zeros(1,dtype = torch.float32)

params = [W1 ,b1,W2,b2]
for param in params:
    param.requires_grad_(requires_grad = True)
#激活函数
def ReLU(X):
    return torch.max(X,other = torch.tensor(0.0))
#定义模型
def net(x):
    x = x.view(-1,num_input)
    H1 = ReLU(torch.matmul(x,W1.t())+b1)
    H2 = torch.matmul(H1,W2.t()+b2)
    return H2
#定义多分类交叉熵损失函数  
loss = torch.nn.CrossEntropyLoss()  
def SGD(params,lr):
    for param in params:
        param.data -= param.grad/batch_size
#定义评估函数
def evaluate_loss(data_iter,net):
        acc_sum,loss_sum,n= 0,0,0
        for x,y in data_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            loss_sum += l.item()
            acc_sum += (y_pred.argmax(dim =1)==y).sum().item()
            n += y.shape[0]
        return acc_sum/n,loss_sum/n
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr):
    train_ls ,test_ls = [],[]
    for epoch in range(num_epochs):
        train_l_sum, train_acc_num,n = 0.0,0.0,0
        for x ,y in train_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            if params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            l.backward()
            SGD(params,lr)
            train_l_sum += l.item()
            train_acc_num += (y_pred.argmax(dim = 1)==y).sum().item()
            n +=y.shape[0]
        train_ls.append(train_l_sum/n)  
        test_acc,test_l = evaluate_loss(test_iter,net)  
        test_ls.append(test_l)
        print('epoch %d, train_loss %.6f,test_loss %f,train_acc %.6f,test_acc %.6f'%(epoch+1, train_ls[epoch],test_ls[epoch],train_acc_num/n,test_acc))  
    return train_ls,test_ls        
#开始训练
lr = 0.01  
num_epochs = 50  
train_loss,test_loss = train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr)   
epoch 1, train_loss 0.008971,test_loss 0.009095,train_acc 0.238467,test_acc 0.345500
epoch 2, train_loss 0.008803,test_loss 0.008849,train_acc 0.422300,test_acc 0.527800
epoch 3, train_loss 0.008473,test_loss 0.008383,train_acc 0.566150,test_acc 0.614000
epoch 4, train_loss 0.007875,test_loss 0.007582,train_acc 0.616383,test_acc 0.625200
epoch 5, train_loss 0.006961,test_loss 0.006507,train_acc 0.636317,test_acc 0.668500
epoch 6, train_loss 0.005908,test_loss 0.005438,train_acc 0.684450,test_acc 0.729200
epoch 7, train_loss 0.004964,test_loss 0.004572,train_acc 0.737467,test_acc 0.769200
epoch 8, train_loss 0.004234,test_loss 0.003928,train_acc 0.772583,test_acc 0.795800
epoch 9, train_loss 0.003690,test_loss 0.003451,train_acc 0.795417,test_acc 0.810800
epoch 10, train_loss 0.003284,test_loss 0.003094,train_acc 0.811533,test_acc 0.822900
epoch 11, train_loss 0.002977,test_loss 0.002820,train_acc 0.823083,test_acc 0.833800
epoch 12, train_loss 0.002739,test_loss 0.002605,train_acc 0.832267,test_acc 0.842600
epoch 13, train_loss 0.002551,test_loss 0.002434,train_acc 0.840417,test_acc 0.850900
epoch 14, train_loss 0.002399,test_loss 0.002294,train_acc 0.847433,test_acc 0.858700
epoch 15, train_loss 0.002273,test_loss 0.002178,train_acc 0.853583,test_acc 0.862900
epoch 16, train_loss 0.002171,test_loss 0.002080,train_acc 0.858767,test_acc 0.866400
epoch 17, train_loss 0.002083,test_loss 0.001997,train_acc 0.863533,test_acc 0.870700
epoch 18, train_loss 0.002006,test_loss 0.001925,train_acc 0.867450,test_acc 0.874600
epoch 19, train_loss 0.001940,test_loss 0.001863,train_acc 0.870750,test_acc 0.876300
epoch 20, train_loss 0.001882,test_loss 0.001808,train_acc 0.873833,test_acc 0.879900
epoch 21, train_loss 0.001832,test_loss 0.001761,train_acc 0.876567,test_acc 0.883300
epoch 22, train_loss 0.001787,test_loss 0.001717,train_acc 0.878317,test_acc 0.885900
epoch 23, train_loss 0.001747,test_loss 0.001680,train_acc 0.880117,test_acc 0.886900
epoch 24, train_loss 0.001712,test_loss 0.001647,train_acc 0.881967,test_acc 0.888100
epoch 25, train_loss 0.001679,test_loss 0.001614,train_acc 0.883283,test_acc 0.889300
epoch 26, train_loss 0.001650,test_loss 0.001587,train_acc 0.884767,test_acc 0.890900
epoch 27, train_loss 0.001624,test_loss 0.001561,train_acc 0.885783,test_acc 0.892400
epoch 28, train_loss 0.001599,test_loss 0.001540,train_acc 0.887483,test_acc 0.893800
epoch 29, train_loss 0.001577,test_loss 0.001517,train_acc 0.888967,test_acc 0.894300
epoch 30, train_loss 0.001557,test_loss 0.001498,train_acc 0.890050,test_acc 0.895700
epoch 31, train_loss 0.001537,test_loss 0.001480,train_acc 0.891483,test_acc 0.895900
epoch 32, train_loss 0.001520,test_loss 0.001462,train_acc 0.892433,test_acc 0.897300
epoch 33, train_loss 0.001504,test_loss 0.001449,train_acc 0.893133,test_acc 0.897900
epoch 34, train_loss 0.001488,test_loss 0.001434,train_acc 0.894017,test_acc 0.899300
epoch 35, train_loss 0.001473,test_loss 0.001419,train_acc 0.895233,test_acc 0.899700
epoch 36, train_loss 0.001461,test_loss 0.001406,train_acc 0.895583,test_acc 0.900000
epoch 37, train_loss 0.001447,test_loss 0.001395,train_acc 0.896367,test_acc 0.900700
epoch 38, train_loss 0.001437,test_loss 0.001385,train_acc 0.897350,test_acc 0.901100
epoch 39, train_loss 0.001426,test_loss 0.001374,train_acc 0.897383,test_acc 0.901500
epoch 40, train_loss 0.001414,test_loss 0.001364,train_acc 0.898050,test_acc 0.901600
epoch 41, train_loss 0.001404,test_loss 0.001352,train_acc 0.898783,test_acc 0.901000
epoch 42, train_loss 0.001393,test_loss 0.001344,train_acc 0.899417,test_acc 0.902400
epoch 43, train_loss 0.001386,test_loss 0.001335,train_acc 0.899900,test_acc 0.903300
epoch 44, train_loss 0.001376,test_loss 0.001327,train_acc 0.900250,test_acc 0.902700
epoch 45, train_loss 0.001367,test_loss 0.001320,train_acc 0.900617,test_acc 0.903700
epoch 46, train_loss 0.001358,test_loss 0.001311,train_acc 0.901400,test_acc 0.904900
epoch 47, train_loss 0.001352,test_loss 0.001304,train_acc 0.901717,test_acc 0.904800
epoch 48, train_loss 0.001343,test_loss 0.001298,train_acc 0.902217,test_acc 0.905800
epoch 49, train_loss 0.001338,test_loss 0.001291,train_acc 0.902667,test_acc 0.906600
epoch 50, train_loss 0.001329,test_loss 0.001285,train_acc 0.903100,test_acc 0.907300
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))  
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)  
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)  
plt.xlabel("epoch")  
plt.ylabel("loss")  
plt.legend()  
plt.show()  

png

4、利用torch.nn实现前馈神经网络解决回归问题

#导入必要的包
import torch 
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,TensorDataset
from sklearn.model_selection import train_test_split
from collections import OrderedDict
from torch.nn import init
#创建数据集
num_input ,num_example = 500,10000
true_w = torch.ones(1,num_input)*0.0056
true_b = 0.028
x_data = torch.tensor(np.random.normal(0,0.001,size  = (num_example,num_input)),dtype = torch.float32)
y = torch.mm(x_data,true_w.t()) +true_b
y += torch.normal(0,0.001,y.shape)
train_x,test_x,train_y,test_y = train_test_split(x_data,y,shuffle= True,test_size=0.3)
#读取数据
batch_size = 50
train_dataset = TensorDataset(train_x,train_y)
train_iter = DataLoader(
    dataset = train_dataset,
    batch_size = batch_size,
    shuffle = True,
    num_workers = 0,
)
test_dataset = TensorDataset(test_x,test_y)
test_iter = DataLoader(
    dataset = test_dataset,
    batch_size = batch_size,
    shuffle = True,
    num_workers = 0,
)
#定义模型
model= nn.Sequential(OrderedDict([
    ('linear1',nn.Linear(num_input,256)),
    ('linear2',nn.Linear(256,128)),
    ('linear3',nn.Linear(128,1)),
])
)
#参数初始化
for param in model.parameters():
    init.normal_(param,mean = 0 ,std = 0.001)
#for param in model.state_dict():  #查看初始化参数
#    print(param)
#    print(model.state_dict()[param])
#定义学习率,损失函数,优化器
lr = 0.001
loss = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr)
#定义训练函数
def train(model,train_iter,test_iter,loss,num_epochs,batch_size,lr):
    train_ls,test_ls = [],[]
    for epoch in range(num_epochs):
        train_ls_sum ,test_ls_sum = 0,0
        for x,y in train_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum += l.item()
        for x ,y in test_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
        train_ls.append(train_ls_sum)
        test_ls.append(test_ls_sum)
        print('epoch %d,train_loss %.6f,test_loss %f'%(epoch+1, train_ls[epoch],test_ls[epoch]))
    return train_ls,test_ls
#开始训练
num_epochs = 30
train_loss ,test_loss = train(model,train_iter,test_iter,loss,num_epochs,batch_size,lr)
epoch 1,train_loss 0.076484,test_loss 0.024390
epoch 2,train_loss 0.043719,test_loss 0.013944
epoch 3,train_loss 0.025016,test_loss 0.007983
epoch 4,train_loss 0.014340,test_loss 0.004580
epoch 5,train_loss 0.008245,test_loss 0.002639
epoch 6,train_loss 0.004766,test_loss 0.001531
epoch 7,train_loss 0.002780,test_loss 0.000898
epoch 8,train_loss 0.001647,test_loss 0.000538
epoch 9,train_loss 0.001000,test_loss 0.000332
epoch 10,train_loss 0.000631,test_loss 0.000215
epoch 11,train_loss 0.000420,test_loss 0.000148
epoch 12,train_loss 0.000300,test_loss 0.000110
epoch 13,train_loss 0.000231,test_loss 0.000089
epoch 14,train_loss 0.000192,test_loss 0.000076
epoch 15,train_loss 0.000170,test_loss 0.000069
epoch 16,train_loss 0.000157,test_loss 0.000065
epoch 17,train_loss 0.000150,test_loss 0.000063
epoch 18,train_loss 0.000146,test_loss 0.000062
epoch 19,train_loss 0.000143,test_loss 0.000061
epoch 20,train_loss 0.000142,test_loss 0.000061
epoch 21,train_loss 0.000141,test_loss 0.000060
epoch 22,train_loss 0.000141,test_loss 0.000060
epoch 23,train_loss 0.000140,test_loss 0.000060
epoch 24,train_loss 0.000140,test_loss 0.000060
epoch 25,train_loss 0.000140,test_loss 0.000060
epoch 26,train_loss 0.000140,test_loss 0.000060
epoch 27,train_loss 0.000140,test_loss 0.000060
epoch 28,train_loss 0.000140,test_loss 0.000060
epoch 29,train_loss 0.000140,test_loss 0.000060
epoch 30,train_loss 0.000140,test_loss 0.000060
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

5、利用torch.nn实现前馈神经网络解决二分类问题

#导入必要的包
import torch 
import torch.nn as nn
from torch.utils.data import TensorDataset,DataLoader
from torch.nn import init
import torch.optim as optim
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
#创建数据集
num_inputs,num_example = 200,10000
x1 = torch.normal(2,1,(num_example,num_inputs))
y1 = torch.ones((num_example,1))
x2 = torch.normal(-2,1,(num_example,num_inputs))
y2 = torch.zeros((num_example,1))
x_data = torch.cat((x1,x2),dim=0)
y_data = torch.cat((y1,y2),dim = 0)
train_x,test_x,train_y,test_y = train_test_split(x_data,y_data,shuffle=True,test_size=0.3,stratify=y_data)
#读取数据
batch_size = 256
train_dataset = TensorDataset(train_x,train_y)
train_iter = DataLoader(
    dataset = train_dataset,
    shuffle = True,
    num_workers = 0,
    batch_size = batch_size
)
test_dataset = TensorDataset(test_x,test_y)
test_iter = DataLoader(
    dataset = test_dataset,
    shuffle = True,
    num_workers = 0,
    batch_size = batch_size
)
#定义模型
num_input,num_hidden,num_output = 200,256,1
class net(nn.Module):
    def __init__(self,num_input,num_hidden,num_output):
        super(net,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden,bias =False)
        self.linear2 = nn.Linear(num_hidden,num_output,bias=False)
    def forward(self,input):
        out = self.linear1(input)
        out = self.linear2(out)
        return out
model = net(num_input,num_hidden,num_output)
print(model)
net(
  (linear1): Linear(in_features=200, out_features=256, bias=False)
  (linear2): Linear(in_features=256, out_features=1, bias=False)
)
#参数初始化
for param in model.parameters():
    init.normal_(param,mean=0,std=0.001)
#定义训练函数
lr = 0.001 #学习率
loss = nn.BCEWithLogitsLoss() #损失函数
optimizer = optim.SGD(model.parameters(),lr) #优化器
def train(net,train_iter,test_iter,loss,num_epochs,batch_size):
    train_ls,test_ls,train_acc,test_acc = [],[],[],[]
    for epoch in range(num_epochs):
        train_ls_sum,train_acc_sum,n = 0,0,0
        for x,y in train_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum +=l.item()
            train_acc_sum += (((y_pred>0.5)==y)+0.0).sum().item()
            n += y_pred.shape[0]
        train_ls.append(train_ls_sum)
        train_acc.append(train_acc_sum/n)
        
        test_ls_sum,test_acc_sum,n = 0,0,0
        for x,y in test_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
            test_acc_sum += (((y_pred>0.5)==y)+0.0).sum().item()
            n += y_pred.shape[0]
        test_ls.append(test_ls_sum)
        test_acc.append(test_acc_sum/n)
        print('epoch %d, train_loss %.6f,test_loss %f, train_acc %.6f,test_acc %f'
              %(epoch+1, train_ls[epoch],test_ls[epoch], train_acc[epoch],test_acc[epoch]))
    return train_ls,test_ls,train_acc,test_acc
#训练次数和学习率
num_epochs = 10
train_loss,test_loss,train_acc,test_acc = train(model,train_iter,test_iter,loss,num_epochs,batch_size)
epoch 1, train_loss 37.945052,test_loss 16.447379, train_acc 0.500000,test_acc 0.500000
epoch 2, train_loss 37.093137,test_loss 15.753976, train_acc 0.500000,test_acc 0.500000
epoch 3, train_loss 33.783420,test_loss 13.207288, train_acc 0.500000,test_acc 0.500000
epoch 4, train_loss 24.596180,test_loss 7.912598, train_acc 0.800643,test_acc 1.000000
epoch 5, train_loss 12.851330,test_loss 3.719252, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 6.192728,test_loss 1.927684, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 3.453243,test_loss 1.172522, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 2.221730,test_loss 0.799978, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 1.576031,test_loss 0.590545, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 1.194129,test_loss 0.459820, train_acc 1.000000,test_acc 1.000000
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)

plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

6、利用torch.nn实现前馈神经网络解决多分类问题

#导入必要的包
import torch
import numpy as np
from torch import nn
from torchvision.datasets import MNIST
import torchvision.transforms  as transforms
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torch import nn
#读取数据
mnist_train = datasets.MNIST(root = './data',train = True,download = False,transform =transforms.ToTensor())
mnist_test = datasets.MNIST(root ='./data',train = False,download = False,transform = transforms.ToTensor())

#训练集
batch_size = 256
train_iter = DataLoader( 
    dataset = mnist_train,
    shuffle = True,
    batch_size = batch_size,
    num_workers = 0
)
#测试集
test_iter = DataLoader(
    dataset  = mnist_test,
    shuffle  =False,
    batch_size = batch_size,
    num_workers = 0
)
#定义模型
num_input,num_hidden1,num_hidden2,num_output = 28*28,512,256,10

class DNN(nn.Module):
    def __init__(self,num_input,num_hidden1,num_hidden2,num_output):
        super(DNN,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden1)
        self.linear2 = nn.Linear(num_hidden1,num_hidden2)
        self.linear3 = nn.Linear(num_hidden2,num_output)
    def forward(self,input):
        input = input.view(-1,784)
        out = self.linear1(input)
        out = self.linear2(out)
        out = self.linear3(out)
        return out
#参数初始化
net = DNN(num_input,num_hidden1,num_hidden2,num_output)
for param in net.parameters():
    nn.init.normal_(param,mean=0,std=0.001)
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs):
    train_ls,test_ls,train_acc,test_acc = [],[],[],[]
    for epoch in range(num_epochs):
        train_ls_sum,train_acc_sum,n = 0,0,0
        for x,y in train_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum +=l.item()
            train_acc_sum += (y_pred.argmax(dim = 1)==y).sum().item()
            n += y_pred.shape[0]
        train_ls.append(train_ls_sum)
        train_acc.append(train_acc_sum/n)
        
        test_ls_sum,test_acc_sum,n = 0,0,0
        for x,y in test_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
            test_acc_sum += (y_pred.argmax(dim = 1)==y).sum().item()
            n += y_pred.shape[0]
        test_ls.append(test_ls_sum)
        test_acc.append(test_acc_sum/n)
        print('epoch %d, train_loss %.6f,test_loss %f, train_acc %.6f,test_acc %f'
              %(epoch+1, train_ls[epoch],test_ls[epoch], train_acc[epoch],test_acc[epoch]))
    return train_ls,test_ls,train_acc,test_acc
#训练次数和学习率
num_epochs = 20
lr = 0.1
loss  = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(),lr=lr)
#训练模型
train_loss,test_loss,train_acc,test_acc = train(net,train_iter,test_iter,loss,num_epochs)
epoch 1, train_loss 540.799467,test_loss 92.040990, train_acc 0.112367,test_acc 0.113500
epoch 2, train_loss 540.764961,test_loss 92.034218, train_acc 0.112367,test_acc 0.113500
epoch 3, train_loss 536.228873,test_loss 82.101556, train_acc 0.135217,test_acc 0.210900
epoch 4, train_loss 343.734764,test_loss 44.310093, train_acc 0.428300,test_acc 0.589900
epoch 5, train_loss 248.494388,test_loss 41.040715, train_acc 0.618967,test_acc 0.647000
epoch 6, train_loss 188.948851,test_loss 24.100985, train_acc 0.741950,test_acc 0.823700
epoch 7, train_loss 129.901514,test_loss 20.229743, train_acc 0.837283,test_acc 0.848900
epoch 8, train_loss 117.726660,test_loss 20.402800, train_acc 0.855067,test_acc 0.853800
epoch 9, train_loss 113.338089,test_loss 18.655747, train_acc 0.862267,test_acc 0.866700
epoch 10, train_loss 110.835923,test_loss 18.521891, train_acc 0.865283,test_acc 0.867700
epoch 11, train_loss 109.573734,test_loss 20.751974, train_acc 0.867683,test_acc 0.844300
epoch 12, train_loss 108.155132,test_loss 18.779759, train_acc 0.869133,test_acc 0.864000
epoch 13, train_loss 107.236334,test_loss 18.818478, train_acc 0.869767,test_acc 0.861400
epoch 14, train_loss 104.952656,test_loss 17.523473, train_acc 0.872950,test_acc 0.877000
epoch 15, train_loss 95.933338,test_loss 15.370274, train_acc 0.884533,test_acc 0.889200
epoch 16, train_loss 89.656444,test_loss 15.603838, train_acc 0.892850,test_acc 0.886000
epoch 17, train_loss 87.530237,test_loss 14.871621, train_acc 0.895283,test_acc 0.894700
epoch 18, train_loss 86.691829,test_loss 14.633050, train_acc 0.895700,test_acc 0.893300
epoch 19, train_loss 86.209352,test_loss 14.794541, train_acc 0.897517,test_acc 0.895500
epoch 20, train_loss 85.574102,test_loss 15.747811, train_acc 0.897617,test_acc 0.889300
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

7、在多分类实验的基础上使用至少三种不同的激活函数

#导入必要的包
import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset,DataLoader
import torchvision
from IPython import display
from torchvision import transforms
#读取数据
mnist_train = datasets.MNIST(root = './data',train = True,download = False,transform =transforms.ToTensor())
mnist_test = datasets.MNIST(root ='./data',train = False,download = False,transform = transforms.ToTensor())

#训练集
batch_size = 256
train_iter = DataLoader( 
    dataset = mnist_train,
    shuffle = True,
    batch_size = batch_size,
    num_workers = 0
)
#测试集
test_iter = DataLoader(
    dataset  = mnist_test,
    shuffle  =False,
    batch_size = batch_size,
    num_workers = 0
)
#平滑层
class FlattenLayer(torch.nn.Module):
    def __init__(self):
         super(FlattenLayer, self).__init__()
    def forward(self, x):
        return x.view(x.shape[0],784)
#定义模型选择函数,选用不同的激活函数
num_input,num_hidden1,num_hidden2,num_output = 28*28,512,256,10
def choose_model(model_type):
    if model_type =='ReLU':
        activation = nn.ReLU()
    elif model_type =='ELU':
        activation = nn.ELU()
    else:
        activation = nn.Sigmoid()
    model = nn.Sequential()
    model.add_module("flatten",FlattenLayer())
    model.add_module("linear1",nn.Linear(num_input,num_hidden1))
    model.add_module("activation1",activation)
    model.add_module("linear2",nn.Linear(num_hidden1,num_hidden2))
    model.add_module("activation2",activation)
    model.add_module("linear3",nn.Linear(num_hidden2,num_output))
    return model 

#选用RELU激活函数
model = choose_model('ReLU')
print(model)
Sequential(
  (flatten): FlattenLayer()
  (linear1): Linear(in_features=784, out_features=512, bias=True)
  (activation1): ReLU()
  (linear2): Linear(in_features=512, out_features=256, bias=True)
  (activation2): ReLU()
  (linear3): Linear(in_features=256, out_features=10, bias=True)
)
#参数初始化

# for param in model.parameters():
#     nn.init.normal_(param,mean=0,std=0.001)
    
for m in model.modules():
     if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            nn.init.constant_(m.bias, 0.1)
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs):
    train_ls,test_ls,train_acc,test_acc = [],[],[],[]
    for epoch in range(num_epochs):
        train_ls_sum,train_acc_sum,n = 0,0,0
        for x,y in train_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum +=l.item()
            train_acc_sum += (y_pred.argmax(dim = 1)==y).sum().item()
            n += y_pred.shape[0]
        train_ls.append(train_ls_sum)
        train_acc.append(train_acc_sum/n)
        
        test_ls_sum,test_acc_sum,n = 0,0,0
        for x,y in test_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
            test_acc_sum += (y_pred.argmax(dim = 1)==y).sum().item()
            n += y_pred.shape[0]
        test_ls.append(test_ls_sum)
        test_acc.append(test_acc_sum/n)
        print('epoch %d, train_loss %.6f,test_loss %f, train_acc %.6f,test_acc %f'
              %(epoch+1, train_ls[epoch],test_ls[epoch], train_acc[epoch],test_acc[epoch]))
    return train_ls,test_ls,train_acc,test_acc_sum
#训练次数和学习率
num_epochs = 20
lr = 0.01
loss  = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=lr)
#开始训练
train_loss,test_loss,train_acc,test_acc = train(model,train_iter,test_iter,loss,num_epochs)
epoch 1, train_loss 308.616926,test_loss 28.868985, train_acc 0.710950,test_acc 0.850200
epoch 2, train_loss 139.798595,test_loss 19.022477, train_acc 0.860183,test_acc 0.883100
epoch 3, train_loss 105.662049,test_loss 15.683169, train_acc 0.883750,test_acc 0.894400
epoch 4, train_loss 91.232774,test_loss 13.972448, train_acc 0.895533,test_acc 0.904300
epoch 5, train_loss 83.192525,test_loss 12.967671, train_acc 0.903183,test_acc 0.909900
epoch 6, train_loss 77.351885,test_loss 12.226103, train_acc 0.908600,test_acc 0.914600
epoch 7, train_loss 73.213573,test_loss 11.575412, train_acc 0.913383,test_acc 0.919000
epoch 8, train_loss 69.823536,test_loss 11.106608, train_acc 0.916533,test_acc 0.922400
epoch 9, train_loss 67.026345,test_loss 10.721981, train_acc 0.919583,test_acc 0.923500
epoch 10, train_loss 64.553079,test_loss 10.369467, train_acc 0.922233,test_acc 0.926800
epoch 11, train_loss 62.387995,test_loss 10.069592, train_acc 0.925150,test_acc 0.927800
epoch 12, train_loss 60.515980,test_loss 9.744006, train_acc 0.927633,test_acc 0.930400
epoch 13, train_loss 58.596680,test_loss 9.493351, train_acc 0.930050,test_acc 0.932400
epoch 14, train_loss 57.004431,test_loss 9.271407, train_acc 0.932017,test_acc 0.933600
epoch 15, train_loss 55.353209,test_loss 9.078836, train_acc 0.933733,test_acc 0.934100
epoch 16, train_loss 53.951313,test_loss 8.839157, train_acc 0.935333,test_acc 0.936100
epoch 17, train_loss 52.599126,test_loss 8.598792, train_acc 0.937450,test_acc 0.938200
epoch 18, train_loss 51.258125,test_loss 8.417678, train_acc 0.938817,test_acc 0.940300
epoch 19, train_loss 50.106973,test_loss 8.243511, train_acc 0.940600,test_acc 0.941700
epoch 20, train_loss 48.901164,test_loss 8.081861, train_acc 0.941867,test_acc 0.943000
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)

plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

#选用ELU激活函数
model = choose_model('ELU')
print(model)
Sequential(
  (flatten): FlattenLayer()
  (linear1): Linear(in_features=784, out_features=512, bias=True)
  (activation1): ELU(alpha=1.0)
  (linear2): Linear(in_features=512, out_features=256, bias=True)
  (activation2): ELU(alpha=1.0)
  (linear3): Linear(in_features=256, out_features=10, bias=True)
)
#训练次数和学习率
num_epochs = 20
lr = 0.01
loss  = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=lr)
#开始训练
train_loss,test_loss,train_acc,test_acc = train(model,train_iter,test_iter,loss,num_epochs)
epoch 1, train_loss 482.716761,test_loss 68.637083, train_acc 0.584467,test_acc 0.716000
epoch 2, train_loss 315.647539,test_loss 39.290958, train_acc 0.740183,test_acc 0.795000
epoch 3, train_loss 193.674695,test_loss 26.591439, train_acc 0.814867,test_acc 0.841900
epoch 4, train_loss 144.241250,test_loss 21.229966, train_acc 0.848333,test_acc 0.863500
epoch 5, train_loss 120.993418,test_loss 18.377310, train_acc 0.866050,test_acc 0.878800
epoch 6, train_loss 107.924811,test_loss 16.673374, train_acc 0.877433,test_acc 0.885900
epoch 7, train_loss 99.529073,test_loss 15.524590, train_acc 0.884550,test_acc 0.892100
epoch 8, train_loss 93.736792,test_loss 14.728054, train_acc 0.889667,test_acc 0.894700
epoch 9, train_loss 89.554104,test_loss 14.152311, train_acc 0.893467,test_acc 0.898300
epoch 10, train_loss 86.393284,test_loss 13.685651, train_acc 0.896683,test_acc 0.901400
epoch 11, train_loss 83.750542,test_loss 13.313578, train_acc 0.898783,test_acc 0.903000
epoch 12, train_loss 81.587891,test_loss 13.009891, train_acc 0.900767,test_acc 0.904800
epoch 13, train_loss 79.762710,test_loss 12.747708, train_acc 0.902550,test_acc 0.907000
epoch 14, train_loss 78.298414,test_loss 12.494096, train_acc 0.904167,test_acc 0.908900
epoch 15, train_loss 77.074863,test_loss 12.312340, train_acc 0.905717,test_acc 0.910900
epoch 16, train_loss 75.805569,test_loss 12.130164, train_acc 0.907400,test_acc 0.911400
epoch 17, train_loss 74.646040,test_loss 11.989905, train_acc 0.908717,test_acc 0.912700
epoch 18, train_loss 73.747813,test_loss 11.816247, train_acc 0.910317,test_acc 0.913900
epoch 19, train_loss 72.848534,test_loss 11.713620, train_acc 0.911283,test_acc 0.914900
epoch 20, train_loss 71.955502,test_loss 11.603105, train_acc 0.912200,test_acc 0.916300
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)

plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

#选用Sigmoid激活函数
model = choose_model('')
print(model)
Sequential(
  (flatten): FlattenLayer()
  (linear1): Linear(in_features=784, out_features=512, bias=True)
  (activation1): Sigmoid()
  (linear2): Linear(in_features=512, out_features=256, bias=True)
  (activation2): Sigmoid()
  (linear3): Linear(in_features=256, out_features=10, bias=True)
)
#训练次数和学习率
num_epochs = 20
lr = 0.01
loss  = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=lr)
#开始训练
train_loss,test_loss,train_acc,test_acc = train(model,train_iter,test_iter,loss,num_epochs)
epoch 1, train_loss 541.170689,test_loss 91.991064, train_acc 0.110733,test_acc 0.113500
epoch 2, train_loss 540.343640,test_loss 91.920487, train_acc 0.113600,test_acc 0.113700
epoch 3, train_loss 539.844377,test_loss 91.834417, train_acc 0.117217,test_acc 0.113500
epoch 4, train_loss 539.274360,test_loss 91.718597, train_acc 0.121817,test_acc 0.113500
epoch 5, train_loss 538.739230,test_loss 91.636162, train_acc 0.118667,test_acc 0.127700
epoch 6, train_loss 538.151297,test_loss 91.507132, train_acc 0.129417,test_acc 0.113500
epoch 7, train_loss 537.456731,test_loss 91.408447, train_acc 0.132767,test_acc 0.153600
epoch 8, train_loss 536.737648,test_loss 91.275780, train_acc 0.147283,test_acc 0.160200
epoch 9, train_loss 535.899790,test_loss 91.084962, train_acc 0.148617,test_acc 0.129700
epoch 10, train_loss 534.948543,test_loss 90.935366, train_acc 0.154017,test_acc 0.412700
epoch 11, train_loss 533.920649,test_loss 90.733490, train_acc 0.188233,test_acc 0.217200
epoch 12, train_loss 532.630701,test_loss 90.485824, train_acc 0.196717,test_acc 0.259400
epoch 13, train_loss 531.136951,test_loss 90.219259, train_acc 0.217433,test_acc 0.377800
epoch 14, train_loss 529.271270,test_loss 89.863493, train_acc 0.253733,test_acc 0.287800
epoch 15, train_loss 527.057478,test_loss 89.447361, train_acc 0.287800,test_acc 0.291900
epoch 16, train_loss 524.282188,test_loss 88.860193, train_acc 0.304533,test_acc 0.443800
epoch 17, train_loss 520.802405,test_loss 88.207349, train_acc 0.342383,test_acc 0.308700
epoch 18, train_loss 516.413755,test_loss 87.353588, train_acc 0.363917,test_acc 0.305600
epoch 19, train_loss 510.790632,test_loss 86.216557, train_acc 0.381167,test_acc 0.321400
epoch 20, train_loss 503.588557,test_loss 84.776352, train_acc 0.395683,test_acc 0.524600
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)

plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

Relu 激活函数函数

优点:

  1. 使用ReLU的SGD算法的收敛速度比 sigmoid 和 tanh 快。
  2. 在x>0区域上,不会出现梯度饱和、梯度消失的问题。
  3. 计算复杂度低,不需要进行指数运算,只要一个阈值就可以得到激活值。

缺点:

  1. ReLU的输出不是0均值的。
  2. Dead ReLU Problem(神经元坏死现象):ReLU在负数区域被kill的现象叫做dead relu。ReLU在训练的时很“脆弱”。在x<0时,梯度为0。这个神经元及之后的神经元梯度永远为0,不再对任何数据有所响应,导致相应参数永远不会被更新。

产生这种现象的两个原因:参数初始化问题;learning rate太高导致在训练过程中参数更新太大。

解决方法:采用Xavier初始化方法,以及避免将learning rate设置太大或使用adagrad等自动调节learning rate的算法。

ELU 激活函数函数

指数线性单元(ELU):具有relu的优势,没有Dead ReLU问题,输出均值接近0,实际上PReLU和Leaky ReLU都有这一优点。有负数饱和区域,从而对噪声有一些鲁棒性。可以看做是介于ReLU和Leaky ReLU之间的一个函数。当然,这个函数也需要计算exp,从而计算量上更大一些。

Sigmoid 激活函数函数

优点:

Sigmoid函数的输出在(0,1)之间,输出范围有限,优化稳定,可以用作输出层。
连续函数,便于求导。

缺点:

  1. sigmoid函数在变量取绝对值非常大的正值或负值时会出现饱和现象,意味着函数会变得很平,并且对输入的微小改变会变得不敏感。

    在反向传播时,当梯度接近于0,权重基本不会更新,很容易就会出现梯度消失的情况,从而无法完成深层网络的训练。

  2. sigmoid函数的输出不是0均值的,会导致后层的神经元的输入是非0均值的信号,这会对梯度产生影响。

  3. 计算复杂度高,因为sigmoid函数是指数形式。

8、对多分类任务中的模型评估隐藏层层数和隐藏单元个数对实验结果的影响

#导入必要的包
import torch 
import torch.nn as nn
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,TensorDataset
#读取数据
mnist_train = datasets.MNIST(root = './data',train = True,download = False,transform =transforms.ToTensor())
mnist_test = datasets.MNIST(root ='./data',train = False,download = False,transform = transforms.ToTensor())
#把数据放在GPU上
train_x = mnist_train.data.cuda().type(torch.float32)
train_y = mnist_train.targets.cuda()
test_x = mnist_test.data.cuda().type(torch.float32)
test_y = mnist_test.targets.cuda()
batch_size = 64
#训练集
train_data = TensorDataset(train_x,train_y)
train_iter = DataLoader(
    dataset = train_data,
    shuffle = True,
    batch_size = batch_size
)
#测试集
test_data = TensorDataset(test_x,test_y)
test_iter = DataLoader(
    dataset = test_data,
    shuffle = True,
    batch_size = batch_size
)
#平滑层
class flatten(nn.Module):
    def __init__(self):
        super(flatten,self).__init__()
    def forward(self,x):
        return x.view(x.shape[0],784)

#使用一个隐藏层的模型
class  Linear1(nn.Module ):
    def __init__(self,num_input,num_hidden,num_output):
        super(Linear1,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden)
        self.linear2 = nn.Linear(num_hidden,num_output)
        self.flatten = flatten()
        self.relu = nn.ReLU()
    def forward(self,input):
        out = self.flatten(input)
        out = self.relu(self.linear1(out))
        out = self.linear2(out)
        return out
    
#使用两个隐藏层的模型
class  Linear2(nn.Module ):
    def __init__(self,num_input,num_hidden1,num_hidden2,num_output):
        super(Linear2,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden1)
        self.linear2 = nn.Linear(num_hidden1,num_hidden2)
        self.linear3 = nn.Linear(num_hidden2,num_output)
        self.flatten = flatten()
        self.relu = nn.ReLU()
    def forward(self,input):
        out = self.flatten(input)
        out = self.relu(self.linear1(out))
        out = self.relu(self.linear2(out))
        out = self.linear3(out)
        return out
    
#使用三个隐藏层的模型
class  Linear3(nn.Module ):
    def __init__(self,num_input,num_hidden1,num_hidden2,num_hidden3,num_output):
        super(Linear3,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden1)
        self.linear2 = nn.Linear(num_hidden1,num_hidden2)
        self.linear3 = nn.Linear(num_hidden2,num_hidden3)
        self.linear4 = nn.Linear(num_hidden3,num_output)
        self.flatten = flatten()
        self.relu = nn.ReLU()
    def forward(self,input):
        out = self.flatten(input)
        out = self.relu(self.linear1(out))
        out = self.relu(self.linear2(out))
        out = self.relu(self.linear3(out))
        out = self.linear4(out)
        return out
#使用一个隐藏层,隐藏层单元数为256
num_input,num_hidden,num_output = 784,256,10
lr = 0.001
net = Linear1(num_input,num_hidden,num_output)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 
net = net.to(device) # 移动模型到cuda
loss = nn.CrossEntropyLoss(reduction='mean')
# optimizer = torch.optim.Adam(net.parameters(),lr = lr)
optimizer = torch.optim.SGD(net.parameters(),lr = lr)
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer):
    train_ls ,test_ls, train_acc,test_acc = [],[],[],[]
    for epoch in range(num_epochs):
        train_ls_sum,train_acc_sum,n = 0,0,0
        for x,y in train_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum +=l
            train_acc_sum += (y_pred.argmax(dim = 1) == y).sum().item()
            n += x.shape[0] 
        train_ls.append(train_ls_sum)
        train_acc.append(train_acc_sum/n)
        
        test_ls_sum,test_acc_sum ,n = 0,0,0
        for x,y in test_iter:
            y_pred = net(x)
            l = loss(y_pred,y)
            test_ls_sum +=l
            test_acc_sum += (y_pred.argmax(dim = 1) == y).sum().item()
            n += x.shape[0] 
        test_ls.append(test_ls_sum)
        test_acc.append(test_acc_sum/n)
        print('epoch: %d, train loss: %f, test loss: %f , train acc: %f, test acc: %f  '
              %(epoch+1,train_ls[-1],test_ls[-1],train_acc[-1],test_acc[-1]))
    return train_ls,test_ls
#开始训练
num_epochs = 40
train_ls,test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer)
epoch: 1, train loss: 621.601379, test loss: 38.185352 , train acc: 0.891433, test acc: 0.932000  
epoch: 2, train loss: 161.839584, test loss: 29.395357 , train acc: 0.949333, test acc: 0.947000  
epoch: 3, train loss: 111.308914, test loss: 26.386192 , train acc: 0.965283, test acc: 0.953500  
epoch: 4, train loss: 85.539139, test loss: 24.804693 , train acc: 0.973517, test acc: 0.956900  
epoch: 5, train loss: 67.362610, test loss: 23.844164 , train acc: 0.979233, test acc: 0.959600  
epoch: 6, train loss: 55.357849, test loss: 22.134243 , train acc: 0.982433, test acc: 0.964000  
epoch: 7, train loss: 45.193398, test loss: 22.299192 , train acc: 0.986150, test acc: 0.964200  
epoch: 8, train loss: 37.948372, test loss: 23.407461 , train acc: 0.988567, test acc: 0.962800  
epoch: 9, train loss: 31.578516, test loss: 22.971731 , train acc: 0.990950, test acc: 0.964100  
epoch: 10, train loss: 27.423489, test loss: 22.313839 , train acc: 0.992083, test acc: 0.965300  
epoch: 11, train loss: 23.371908, test loss: 22.942219 , train acc: 0.993250, test acc: 0.965000  
epoch: 12, train loss: 19.784626, test loss: 22.711353 , train acc: 0.994783, test acc: 0.965400  
epoch: 13, train loss: 16.590122, test loss: 22.317698 , train acc: 0.996267, test acc: 0.965500  
epoch: 14, train loss: 14.155085, test loss: 22.707125 , train acc: 0.996750, test acc: 0.965400  
epoch: 15, train loss: 12.591855, test loss: 22.448900 , train acc: 0.997300, test acc: 0.968200  
epoch: 16, train loss: 10.816659, test loss: 23.215742 , train acc: 0.997783, test acc: 0.966700  
epoch: 17, train loss: 9.363224, test loss: 22.951975 , train acc: 0.998600, test acc: 0.968600  
epoch: 18, train loss: 8.017034, test loss: 22.850584 , train acc: 0.998983, test acc: 0.968500  
epoch: 19, train loss: 7.078544, test loss: 23.094091 , train acc: 0.999050, test acc: 0.968700  
epoch: 20, train loss: 6.354239, test loss: 23.198242 , train acc: 0.999167, test acc: 0.968900  
epoch: 21, train loss: 5.512948, test loss: 23.960939 , train acc: 0.999467, test acc: 0.968500  
epoch: 22, train loss: 5.018165, test loss: 23.657450 , train acc: 0.999550, test acc: 0.969400  
epoch: 23, train loss: 4.442701, test loss: 23.721956 , train acc: 0.999783, test acc: 0.969700  
epoch: 24, train loss: 4.040270, test loss: 23.780748 , train acc: 0.999850, test acc: 0.969800  
epoch: 25, train loss: 3.706507, test loss: 23.895079 , train acc: 0.999817, test acc: 0.969300  
epoch: 26, train loss: 3.388275, test loss: 23.945677 , train acc: 0.999883, test acc: 0.969800  
epoch: 27, train loss: 3.099135, test loss: 24.599882 , train acc: 0.999933, test acc: 0.970000  
epoch: 28, train loss: 2.906347, test loss: 24.428415 , train acc: 0.999950, test acc: 0.970500  
epoch: 29, train loss: 2.673533, test loss: 24.164261 , train acc: 0.999950, test acc: 0.970600  
epoch: 30, train loss: 2.503164, test loss: 24.230101 , train acc: 0.999967, test acc: 0.970500  
epoch: 31, train loss: 2.325522, test loss: 25.083099 , train acc: 0.999983, test acc: 0.970100  
epoch: 32, train loss: 2.198251, test loss: 24.612061 , train acc: 0.999983, test acc: 0.970600  
epoch: 33, train loss: 2.071165, test loss: 24.733877 , train acc: 0.999983, test acc: 0.971400  
epoch: 34, train loss: 1.950522, test loss: 26.051252 , train acc: 0.999983, test acc: 0.970800  
epoch: 35, train loss: 1.837415, test loss: 24.839462 , train acc: 0.999983, test acc: 0.970600  
epoch: 36, train loss: 1.747751, test loss: 24.902990 , train acc: 1.000000, test acc: 0.971200  
epoch: 37, train loss: 1.664808, test loss: 24.845909 , train acc: 1.000000, test acc: 0.970700  
epoch: 38, train loss: 1.598066, test loss: 24.953428 , train acc: 1.000000, test acc: 0.970800  
epoch: 39, train loss: 1.525339, test loss: 25.574154 , train acc: 1.000000, test acc: 0.971000  
epoch: 40, train loss: 1.457463, test loss: 25.129980 , train acc: 1.000000, test acc: 0.970700  
#结果可视化
for i, v in enumerate(train_ls): train_ls[i] = v.cpu().item()
for i, v in enumerate(test_ls): test_ls[i] = v.cpu().item()
x = np.linspace(0,len(train_ls),len(train_ls))
plt.plot(x,train_ls,label="train_loss",linewidth=1.5)
plt.plot(x,test_ls,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

#使用一个隐藏层,隐藏层单元数为10
num_input,num_hidden,num_output = 784,10,10
lr = 0.001
net = Linear1(num_input,num_hidden,num_output)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 
net = net.to(device) # 移动模型到cuda
loss = nn.CrossEntropyLoss(reduction='mean')
# optimizer = torch.optim.Adam(net.parameters(),lr = lr)
optimizer = torch.optim.SGD(net.parameters(),lr = lr)
#开始训练
num_epochs = 40
train_ls,test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer)
epoch: 1, train loss: 1882.923584, test loss: 292.056213 , train acc: 0.256517, test acc: 0.291500  
epoch: 2, train loss: 1707.555298, test loss: 286.507935 , train acc: 0.297083, test acc: 0.307000  
epoch: 3, train loss: 1657.658569, test loss: 263.167328 , train acc: 0.316033, test acc: 0.333800  
epoch: 4, train loss: 1513.728027, test loss: 251.886261 , train acc: 0.391833, test acc: 0.424600  
epoch: 5, train loss: 1464.693115, test loss: 248.435608 , train acc: 0.431783, test acc: 0.428400  
epoch: 6, train loss: 1428.319458, test loss: 237.969772 , train acc: 0.434833, test acc: 0.442300  
epoch: 7, train loss: 1411.860107, test loss: 237.456528 , train acc: 0.435233, test acc: 0.439100  
epoch: 8, train loss: 1395.036011, test loss: 232.131042 , train acc: 0.437733, test acc: 0.457100  
epoch: 9, train loss: 1380.810669, test loss: 230.611664 , train acc: 0.458817, test acc: 0.462800  
epoch: 10, train loss: 1366.782715, test loss: 227.642639 , train acc: 0.461250, test acc: 0.463500  
epoch: 11, train loss: 1355.928345, test loss: 235.830261 , train acc: 0.464433, test acc: 0.443800  
epoch: 12, train loss: 1346.336548, test loss: 223.974808 , train acc: 0.464033, test acc: 0.468800  
epoch: 13, train loss: 1339.983887, test loss: 224.731720 , train acc: 0.466100, test acc: 0.472600  
epoch: 14, train loss: 1331.307251, test loss: 226.499115 , train acc: 0.467883, test acc: 0.463700  
epoch: 15, train loss: 1325.929199, test loss: 223.289520 , train acc: 0.468933, test acc: 0.464300  
epoch: 16, train loss: 1320.538330, test loss: 222.412247 , train acc: 0.470617, test acc: 0.470400  
epoch: 17, train loss: 1316.345581, test loss: 221.446472 , train acc: 0.472567, test acc: 0.474800  
epoch: 18, train loss: 1308.953613, test loss: 220.888336 , train acc: 0.474867, test acc: 0.476300  
epoch: 19, train loss: 1303.424438, test loss: 220.824158 , train acc: 0.474967, test acc: 0.467500  
epoch: 20, train loss: 1298.293335, test loss: 216.120331 , train acc: 0.475783, test acc: 0.481500  
epoch: 21, train loss: 1295.377319, test loss: 218.624985 , train acc: 0.476017, test acc: 0.481000  
epoch: 22, train loss: 1291.513672, test loss: 225.369675 , train acc: 0.478117, test acc: 0.450200  
epoch: 23, train loss: 1285.035400, test loss: 216.369339 , train acc: 0.478933, test acc: 0.474700  
epoch: 24, train loss: 1282.567139, test loss: 213.740005 , train acc: 0.478467, test acc: 0.481600  
epoch: 25, train loss: 1276.716919, test loss: 212.172501 , train acc: 0.480150, test acc: 0.480600  
epoch: 26, train loss: 1271.666016, test loss: 211.772537 , train acc: 0.480517, test acc: 0.491100  
epoch: 27, train loss: 1268.365112, test loss: 219.147308 , train acc: 0.481100, test acc: 0.471400  
epoch: 28, train loss: 1264.611816, test loss: 213.096161 , train acc: 0.483333, test acc: 0.474700  
epoch: 29, train loss: 1261.807129, test loss: 213.399536 , train acc: 0.481050, test acc: 0.489700  
epoch: 30, train loss: 1256.287354, test loss: 210.990631 , train acc: 0.485167, test acc: 0.480800  
epoch: 31, train loss: 1252.884033, test loss: 214.785492 , train acc: 0.485600, test acc: 0.468900  
epoch: 32, train loss: 1250.899658, test loss: 208.248489 , train acc: 0.485467, test acc: 0.489600  
epoch: 33, train loss: 1245.671753, test loss: 210.261353 , train acc: 0.486917, test acc: 0.486100  
epoch: 34, train loss: 1223.911133, test loss: 221.004822 , train acc: 0.501617, test acc: 0.477700  
epoch: 35, train loss: 1089.320435, test loss: 167.636948 , train acc: 0.575200, test acc: 0.624300  
epoch: 36, train loss: 998.740234, test loss: 156.567474 , train acc: 0.630883, test acc: 0.661000  
epoch: 37, train loss: 948.370605, test loss: 164.442108 , train acc: 0.655750, test acc: 0.658900  
epoch: 38, train loss: 934.384460, test loss: 159.945007 , train acc: 0.661083, test acc: 0.660100  
epoch: 39, train loss: 914.243469, test loss: 149.445465 , train acc: 0.666967, test acc: 0.681800  
epoch: 40, train loss: 907.217957, test loss: 158.249619 , train acc: 0.667667, test acc: 0.673300  
#结果可视化
for i, v in enumerate(train_ls): train_ls[i] = v.cpu().item()
for i, v in enumerate(test_ls): test_ls[i] = v.cpu().item()
x = np.linspace(0,len(train_ls),len(train_ls))
plt.plot(x,train_ls,label="train_loss",linewidth=1.5)
plt.plot(x,test_ls,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

#使用两个隐藏层,隐藏层单元数为256、128
num_input,num_hidden1,num_hidden2,num_output = 784,256,128,10
lr = 0.001
net = Linear2(num_input,num_hidden1,num_hidden2,num_output)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 
net = net.to(device) # 移动模型到cuda
loss = nn.CrossEntropyLoss(reduction='mean')
# optimizer = torch.optim.Adam(net.parameters(),lr = lr)
optimizer = torch.optim.SGD(net.parameters(),lr = lr)
#开始训练
num_epochs = 40
train_ls,test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer)
epoch: 1, train loss: 390.323212, test loss: 33.214958 , train acc: 0.886583, test acc: 0.937300  
epoch: 2, train loss: 158.090027, test loss: 24.187660 , train acc: 0.948783, test acc: 0.953100  
epoch: 3, train loss: 115.073936, test loss: 20.798477 , train acc: 0.962833, test acc: 0.960200  
epoch: 4, train loss: 89.472221, test loss: 19.384373 , train acc: 0.971600, test acc: 0.962900  
epoch: 5, train loss: 73.079132, test loss: 18.388588 , train acc: 0.976867, test acc: 0.965100  
epoch: 6, train loss: 61.154640, test loss: 17.342785 , train acc: 0.981000, test acc: 0.966200  
epoch: 7, train loss: 51.596523, test loss: 17.139698 , train acc: 0.983733, test acc: 0.966100  
epoch: 8, train loss: 43.513409, test loss: 17.951765 , train acc: 0.986733, test acc: 0.966700  
epoch: 9, train loss: 37.227768, test loss: 16.330507 , train acc: 0.988800, test acc: 0.969400  
epoch: 10, train loss: 31.859921, test loss: 16.202168 , train acc: 0.990517, test acc: 0.969400  
epoch: 11, train loss: 27.363804, test loss: 16.035997 , train acc: 0.992483, test acc: 0.970400  
epoch: 12, train loss: 23.427250, test loss: 15.378540 , train acc: 0.994517, test acc: 0.972700  
epoch: 13, train loss: 20.392759, test loss: 16.058687 , train acc: 0.995150, test acc: 0.972100  
epoch: 14, train loss: 17.521107, test loss: 15.464679 , train acc: 0.996417, test acc: 0.973300  
epoch: 15, train loss: 15.396051, test loss: 15.600229 , train acc: 0.997317, test acc: 0.972800  
epoch: 16, train loss: 13.394774, test loss: 15.679301 , train acc: 0.997667, test acc: 0.972100  
epoch: 17, train loss: 11.599453, test loss: 15.663043 , train acc: 0.998517, test acc: 0.972700  
epoch: 18, train loss: 10.544410, test loss: 15.570680 , train acc: 0.998650, test acc: 0.973700  
epoch: 19, train loss: 9.173516, test loss: 16.157354 , train acc: 0.999250, test acc: 0.972000  
epoch: 20, train loss: 8.200336, test loss: 15.672752 , train acc: 0.999283, test acc: 0.973800  
epoch: 21, train loss: 7.410593, test loss: 16.201338 , train acc: 0.999517, test acc: 0.972900  
epoch: 22, train loss: 6.666228, test loss: 15.725000 , train acc: 0.999683, test acc: 0.973100  
epoch: 23, train loss: 6.053044, test loss: 15.855933 , train acc: 0.999817, test acc: 0.973200  
epoch: 24, train loss: 5.571966, test loss: 16.054897 , train acc: 0.999817, test acc: 0.973600  
epoch: 25, train loss: 5.097965, test loss: 16.192806 , train acc: 0.999900, test acc: 0.973200  
epoch: 26, train loss: 4.673306, test loss: 16.048717 , train acc: 0.999917, test acc: 0.973600  
epoch: 27, train loss: 4.378810, test loss: 16.002748 , train acc: 0.999833, test acc: 0.973700  
epoch: 28, train loss: 4.049458, test loss: 16.066948 , train acc: 0.999933, test acc: 0.973900  
epoch: 29, train loss: 3.744870, test loss: 16.227142 , train acc: 0.999950, test acc: 0.973400  
epoch: 30, train loss: 3.556428, test loss: 16.264824 , train acc: 0.999950, test acc: 0.974000  
epoch: 31, train loss: 3.311225, test loss: 16.318981 , train acc: 0.999967, test acc: 0.974400  
epoch: 32, train loss: 3.112557, test loss: 16.247766 , train acc: 0.999967, test acc: 0.974100  
epoch: 33, train loss: 2.941828, test loss: 16.332848 , train acc: 1.000000, test acc: 0.974400  
epoch: 34, train loss: 2.765349, test loss: 16.701546 , train acc: 1.000000, test acc: 0.974000  
epoch: 35, train loss: 2.636833, test loss: 16.458916 , train acc: 1.000000, test acc: 0.974300  
epoch: 36, train loss: 2.515354, test loss: 16.530739 , train acc: 1.000000, test acc: 0.974400  
epoch: 37, train loss: 2.385297, test loss: 16.583447 , train acc: 1.000000, test acc: 0.974300  
epoch: 38, train loss: 2.285072, test loss: 16.682545 , train acc: 1.000000, test acc: 0.973800  
epoch: 39, train loss: 2.186315, test loss: 16.659136 , train acc: 1.000000, test acc: 0.974100  
epoch: 40, train loss: 2.093811, test loss: 16.794155 , train acc: 1.000000, test acc: 0.974100  
#结果可视化
for i, v in enumerate(train_ls): train_ls[i] = v.cpu().item()
for i, v in enumerate(test_ls): test_ls[i] = v.cpu().item()
x = np.linspace(0,len(train_ls),len(train_ls))
plt.plot(x,train_ls,label="train_loss",linewidth=1.5)
plt.plot(x,test_ls,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

png

1.确定隐藏层的层数

对于一些很简单的数据集,一层甚至两层隐藏元都已经够了,隐藏层的层数不一定设置的越好,过多的隐藏层可能会导致数据过拟合。对于自然语言处理以及CV领域,则建议增加网络层数。

层数越深,理论上来说模型拟合函数的能力增强,效果会更好,但是实际上更深的层数可能会带来过拟合的问题,同时也会增加训练难度,使模型难以收敛。

因此这里给出的建议是,在使用神经网络时,最好可以参照已有的性能良好的模型。

尝试迁移和微调已有的预训练模型,能取得事半功倍的效果。

2.确定隐藏层中的神经元数量

在隐藏层中使用太少的神经元将导致欠拟合(underfitting)。

相反,使用过多的神经元同样会导致一些问题。首先,隐藏层中的神经元过多可能会导致过拟合(overfitting)。

当神经网络具有过多的节点时,训练集中包含的有限信息量不足以训练隐藏层中的所有神经元,因此就会导致过拟合。即使训练数据包含的信息量足够,隐藏层中过多的神经元会增加训练时间,从而难以达到预期的效果。显然,选择一个合适的隐藏层神经元数量是至关重要的。

通常对于某些数据集,拥有较大的第一层并在其后跟随较小的层将导致更好的性能,因为第一层可以学习很多低阶的特征,这些较低层的特征可以馈入后续层中,提取出较高阶特征。

需要注意的是,与在每一层中添加更多的神经元相比,添加层层数将获得更大的性能提升。因此,不要在一个隐藏层中加入过多的神经元。

按照经验来说,神经元数量可以由以下规则来确定:

还有另一种方法可供参考,神经元数量通常可以由一下几个原则大致确定: 
1 隐藏神经元的数量应在输入层的大小和输出层的大小之间。
2 隐藏神经元的数量应为输入层大小的2/3加上输出层大小的2/3。
3 隐藏神经元的数量应小于输入层大小的两倍。
总而言之,隐藏层神经元是最佳数量需要自己通过不断试验来进行微调,建议从一个较小数值比如1到3层和1到100个神经元开始。

如果欠拟合然后慢慢添加更多的层和神经元,如果过拟合就减小层数和神经元。此外,在实际过程中还可以考虑引入Batch Normalization, Dropout, 正则化等降低过拟合的方法。

9、在多分类任务实验中手动实现dropout

import torch
import torch.nn as nn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
#读取数据
mnist_train = torchvision.datasets.MNIST(root='./data', train=True, download=False, transform=transforms.ToTensor())  
mnist_test = torchvision.datasets.MNIST(root='./data', train=False,download=False, transform=transforms.ToTensor())  

  
batch_size = 256 
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,num_workers=0)  
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,num_workers=0)  
#初始化参数  
num_inputs,num_hiddens,num_outputs =784, 256,10
num_epochs=30
lr = 0.001
def init_param():
    W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens,num_inputs)), dtype=torch.float32)  
    b1 = torch.zeros(1, dtype=torch.float32)  
    W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs,num_hiddens)), dtype=torch.float32)  
    b2 = torch.zeros(1, dtype=torch.float32)  
    params =[W1,b1,W2,b2]
    for param in params:  
        param.requires_grad_(requires_grad=True)  
    return W1,b1,W2,b2
#手动定义dropout函数
def dropout(X, drop_prob): #drop_porb是一个概率值,介于0-1,代表要丢弃多少比例的神经元
    X = X.float()
    assert 0 <= drop_prob <= 1
    keep_prob = 1 - drop_prob
    if keep_prob == 0:
        return torch.zeros_like(X)
    mask = (torch.rand(X.shape) < keep_prob).float()
    return mask * X / keep_prob
#定义模型
def net(X, is_training=True):
    X = X.view(-1, num_inputs)
    H1 = (torch.matmul(X, W1.t()) + b1).relu()
    if is_training:      #只有在训练模式下才需要dropout,测试情况下不需要
        H1 = dropout(H1, drop_prob1)
    return (torch.matmul(H1,W2.t()) + b2).relu()
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr=None,optimizer=None):
    train_ls, test_ls = [], []
    for epoch in range(num_epochs):
        ls, count = 0, 0
        for X,y in train_iter:
            l=loss(net(X),y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            ls += l.item()
            count += y.shape[0]
        train_ls.append(ls)
        ls, count = 0, 0
        for X,y in test_iter:
            l=loss(net(X,is_training=False),y)
            ls += l.item()
            count += y.shape[0]
        test_ls.append(ls)
        if(epoch+1)%10==0:
            print('epoch: %d, train loss: %f, test loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
    return train_ls,test_ls
#定义drop从0到1,训练十次,观察不同drop对结果的影响
drop_probs = np.arange(0,1.1,0.1)
Train_ls, Test_ls = [], []
#开始训练
for drop_prob in drop_probs:
    drop_prob1 = drop_prob
    W1,b1,W2,b2 = init_param()
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD([W1,b1,W2,b2],lr = 0.001)
    train_ls, test_ls =  train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr,optimizer)   
    Train_ls.append(train_ls)
    Test_ls.append(test_ls)
epoch: 10, train loss: 516.144661, test loss: 87.292030
epoch: 20, train loss: 409.631835, test loss: 67.948953
epoch: 30, train loss: 267.421560, test loss: 43.781419
epoch: 10, train loss: 523.720548, test loss: 88.764539
epoch: 20, train loss: 449.655348, test loss: 75.243959
epoch: 30, train loss: 309.441015, test loss: 50.588724
epoch: 10, train loss: 516.920736, test loss: 87.366389
epoch: 20, train loss: 413.409649, test loss: 68.334782
epoch: 30, train loss: 273.086134, test loss: 44.036746
epoch: 10, train loss: 512.581922, test loss: 86.573160
epoch: 20, train loss: 403.593178, test loss: 66.554746
epoch: 30, train loss: 270.923455, test loss: 43.437635
epoch: 10, train loss: 518.073110, test loss: 87.617643
epoch: 20, train loss: 419.529858, test loss: 69.366209
epoch: 30, train loss: 284.807317, test loss: 45.474226
epoch: 10, train loss: 517.799735, test loss: 87.550153
epoch: 20, train loss: 419.551413, test loss: 69.145667
epoch: 30, train loss: 288.334462, test loss: 45.601447
epoch: 10, train loss: 518.252947, test loss: 87.595468
epoch: 20, train loss: 425.340550, test loss: 70.020597
epoch: 30, train loss: 300.287416, test loss: 47.258004
epoch: 10, train loss: 521.961344, test loss: 88.277417
epoch: 20, train loss: 437.840800, test loss: 72.146243
epoch: 30, train loss: 318.444086, test loss: 49.818887
epoch: 10, train loss: 520.050051, test loss: 87.886063
epoch: 20, train loss: 438.702492, test loss: 71.917437
epoch: 30, train loss: 330.767534, test loss: 50.951549
epoch: 10, train loss: 525.508445, test loss: 88.853060
epoch: 20, train loss: 462.326507, test loss: 75.884879
epoch: 30, train loss: 372.319627, test loss: 57.118928
epoch: 10, train loss: 541.107560, test loss: 92.105321
epoch: 20, train loss: 541.107560, test loss: 92.105321
epoch: 30, train loss: 541.107560, test loss: 92.105321
#结果可视化
x = np.linspace(0,len(train_ls),len(train_ls))
plt.figure(figsize=(10,8))
for i in range(0,len(drop_probs)):
    plt.plot(x,Train_ls[i],label= 'drop_prob=%.1f'%(drop_probs[i]),linewidth=1.5)
    plt.xlabel('epoch')
    plt.ylabel('loss')
# plt.legend()
plt.legend(loc=2, bbox_to_anchor=(1.05,1.0),borderaxespad = 0.)
plt.title('train loss with dropout')
plt.show()

png

10、在多分类任务实验中用torch.nn实现dropout

#导入必要的包
import torch
import torch.nn as nn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
#读取数据
mnist_train = datasets.MNIST(root = './data',train = True,download = False,transform =transforms.ToTensor())
mnist_test = datasets.MNIST(root ='./data',train = False,download = False,transform = transforms.ToTensor())

batch_size = 256
train_iter = DataLoader( 
    dataset = mnist_train,
    shuffle = True,
    batch_size = batch_size,
    num_workers = 0
)
test_iter = DataLoader(
    dataset  = mnist_test,
    shuffle  =False,
    batch_size = batch_size,
    num_workers = 0
)
#定义模型
class LinearNet(nn.Module):
    def __init__(self,num_inputs, num_outputs, num_hiddens1, num_hiddens2, drop_prob1,drop_prob2):
        super(LinearNet,self).__init__()
        self.linear1 = nn.Linear(num_inputs,num_hiddens1)
        self.relu = nn.ReLU()
        self.drop1 = nn.Dropout(drop_prob1) #nn模块封装好了Dropout层,只需要输入dropout值即可
        self.linear2 = nn.Linear(num_hiddens1,num_hiddens2)
        self.drop2 = nn.Dropout(drop_prob2)
        self.linear3 = nn.Linear(num_hiddens2,num_outputs)
        self.flatten  = nn.Flatten()
    
    def forward(self,x):
        x = self.flatten(x)
        x = self.linear1(x)
        x = self.relu(x)
        x = self.drop1(x)
        x = self.linear2(x)
        x = self.relu(x)
        x = self.drop2(x)
        x = self.linear3(x)
        y = self.relu(x)
        return y
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
    train_ls, test_ls = [], []
    for epoch in range(num_epochs):
        ls, count = 0, 0
        for X,y in train_iter:
            l=loss(net(X),y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            ls += l.item()
            count += y.shape[0]
        train_ls.append(ls)
        ls, count = 0, 0
        for X,y in test_iter:
            l=loss(net(X),y)
            ls += l.item()
            count += y.shape[0]
        test_ls.append(ls)
        if(epoch+1)%5==0:
            print('epoch: %d, train loss: %f, test loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
    return train_ls,test_ls
#初始化参数,定义隐藏层单元个数
num_inputs,num_hiddens1,num_hiddens2,num_outputs =784, 256,256,10
num_epochs=20
lr = 0.1
#drop从0至1,训练十次,观察不同drop对训练结果的影响
drop_probs = np.arange(0,1.1,0.1)
Train_ls, Test_ls = [], []
#开始训练
for drop_prob in drop_probs:
    net = LinearNet(num_inputs, num_outputs, num_hiddens1, num_hiddens2, drop_prob,drop_prob)
    for param in net.parameters():
        nn.init.normal_(param,mean=0, std= 0.01)
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(net.parameters(),lr)
    train_ls, test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,net.parameters,lr,optimizer)
    Train_ls.append(train_ls)
    Test_ls.append(test_ls)
epoch: 5, train loss: 371.406946, test loss: 62.016456
epoch: 10, train loss: 348.145985, test loss: 58.949540
epoch: 15, train loss: 340.822026, test loss: 57.836469
epoch: 20, train loss: 336.717795, test loss: 57.494935
epoch: 5, train loss: 135.957030, test loss: 22.122259
epoch: 10, train loss: 98.141299, test loss: 16.606467
epoch: 15, train loss: 67.886688, test loss: 5.280764
epoch: 20, train loss: 19.440876, test loss: 4.004938
epoch: 5, train loss: 75.315638, test loss: 11.426665
epoch: 10, train loss: 39.354027, test loss: 6.596288
epoch: 15, train loss: 27.068406, test loss: 4.839954
epoch: 20, train loss: 20.794823, test loss: 4.110217
epoch: 5, train loss: 78.580311, test loss: 12.166224
epoch: 10, train loss: 41.970184, test loss: 6.900298
epoch: 15, train loss: 29.562551, test loss: 5.278342
epoch: 20, train loss: 22.894741, test loss: 4.352063
epoch: 5, train loss: 113.997779, test loss: 14.573777
epoch: 10, train loss: 48.520414, test loss: 7.845017
epoch: 15, train loss: 34.590822, test loss: 5.907451
epoch: 20, train loss: 27.114159, test loss: 5.264598
epoch: 5, train loss: 84.999304, test loss: 12.645558
epoch: 10, train loss: 50.530234, test loss: 8.273804
epoch: 15, train loss: 37.306200, test loss: 6.677785
epoch: 20, train loss: 30.186274, test loss: 5.598818
epoch: 5, train loss: 98.425048, test loss: 15.513188
epoch: 10, train loss: 56.586863, test loss: 9.367530
epoch: 15, train loss: 44.069431, test loss: 7.759971
epoch: 20, train loss: 36.926898, test loss: 6.359529
epoch: 5, train loss: 119.258010, test loss: 18.354252
epoch: 10, train loss: 71.261536, test loss: 11.617584
epoch: 15, train loss: 55.960298, test loss: 9.555392
epoch: 20, train loss: 48.684063, test loss: 8.275167
epoch: 5, train loss: 136.057679, test loss: 21.188809
epoch: 10, train loss: 91.155276, test loss: 15.086043
epoch: 15, train loss: 77.483626, test loss: 13.030408
epoch: 20, train loss: 71.214636, test loss: 11.964710
epoch: 5, train loss: 219.661974, test loss: 34.779572
epoch: 10, train loss: 169.706442, test loss: 27.232380
epoch: 15, train loss: 153.952667, test loss: 25.108418
epoch: 20, train loss: 144.357405, test loss: 25.044181
epoch: 5, train loss: 541.107560, test loss: 92.103415
epoch: 10, train loss: 541.107560, test loss: 92.103415
epoch: 15, train loss: 541.107560, test loss: 92.103415
epoch: 20, train loss: 541.107560, test loss: 92.103415
#训练结果可视化
x = np.linspace(0,len(train_ls),len(train_ls))
plt.figure(figsize=(10,8))
for i in range(0,len(drop_probs)):
    plt.plot(x,Train_ls[i],label= 'drop_prob=%.1f'%(drop_probs[i]),linewidth=1.5)
    plt.xlabel('epoch')
    plt.ylabel('loss')
plt.legend(loc=2, bbox_to_anchor=(1.05,1.0),borderaxespad = 0.)
plt.title('train loss with dropout')
plt.show()

png

Dropout:通过在层之间加入噪音,达到正则化的目的,一般作用在隐藏全连接层的输出上,通过将输出项随机置0来控制模型复杂度。需要注意的是,并不是把节点删掉,因为下一次迭代很有可能置0的项又会被重启。

可以看到,若dropout值设置为1时,所有的神经元都被丢弃,那么结果就是loss不会有任何下降。而适当的dropouot值是能够使得模型的表现能力更好的。

11、在多分类任务实验中手动实现 𝑳𝟐 正则化

#导入必要的包
import torch  
import numpy as np  
import random  
from IPython import display  
from matplotlib import pyplot as plt  
import torchvision  
import torchvision.transforms as transforms   
#读取数据
mnist_train = torchvision.datasets.MNIST(root='./data', train=True, download=False, transform=transforms.ToTensor())  
mnist_test = torchvision.datasets.MNIST(root='./data', train=False,download=False, transform=transforms.ToTensor())  
batch_size = 256 
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,num_workers=0)  
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,num_workers=0)  
#初始化参数
num_inputs,num_hiddens,num_outputs =784, 256,10
def init_param():
    W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens,num_inputs)), dtype=torch.float32)  
    b1 = torch.zeros(1, dtype=torch.float32)  
    W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs,num_hiddens)), dtype=torch.float32)  
    b2 = torch.zeros(1, dtype=torch.float32)  
    params =[W1,b1,W2,b2]
    for param in params:  
        param.requires_grad_(requires_grad=True)  
    return W1,b1,W2,b2

#激活函数
def relu(x):  
    x = torch.max(input=x,other=torch.tensor(0.0))  
    return x  
#定义模型
def net(X):  
    X = X.view((-1,num_inputs))  
    H = relu(torch.matmul(X,W1.t())+b1)  
    return torch.matmul(H,W2.t())+b2  
#损失函数和优化器
loss = torch.nn.CrossEntropyLoss()  
def SGD(paras,lr):  
    for param in params:  
        param.data -= lr * param.grad  
def l2_penalty(w):
    return (w**2).sum()/2
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr=None,optimizer=None,mylambda=0):  
    train_ls, test_ls = [], []
    for epoch in range(num_epochs):
        ls, count = 0, 0
        for X,y in train_iter :
            X = X.reshape(-1,num_inputs)
            l=loss(net(X),y)+ mylambda*l2_penalty(W1) + mylambda*l2_penalty(W2)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            ls += l.item()
            count += y.shape[0]
        train_ls.append(ls)
        ls, count = 0, 0
        for X,y in test_iter:
            X = X.reshape(-1,num_inputs)
            l=loss(net(X),y) + mylambda*l2_penalty(W1) + mylambda*l2_penalty(W2)
            ls += l.item()
            count += y.shape[0]
        test_ls.append(ls)
        if(epoch+1)%5==0:
            print('epoch: %d, train loss: %f, test loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
    return train_ls,test_ls
#学习率和训练次数
lr = 0.01  
num_epochs = 20 

#初始化正则惩罚系数,训练五次,观察不同惩罚系数对结果的影响
Lamda = [0,0.1,0.2,0.3,0.4,0.5]
Train_ls, Test_ls = [], []

#开始训练
for lamda in Lamda:
    print("current lambda is %f"%lamda)
    W1,b1,W2,b2 = init_param()
    loss = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD([W1,b1,W2,b2],lr = 0.001)
    train_ls, test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,lr,optimizer,lamda)   
    Train_ls.append(train_ls)
    Test_ls.append(test_ls)
current lambda is 0.000000
epoch: 5, train loss: 532.320715, test loss: 90.313938
epoch: 10, train loss: 509.702439, test loss: 86.042535
epoch: 15, train loss: 461.247267, test loss: 77.027005
epoch: 20, train loss: 385.417703, test loss: 63.565145
current lambda is 0.100000
epoch: 5, train loss: 728.123517, test loss: 122.965642
epoch: 10, train loss: 676.406183, test loss: 114.178574
epoch: 15, train loss: 625.183748, test loss: 105.291808
epoch: 20, train loss: 568.570740, test loss: 95.395267
current lambda is 0.200000
epoch: 5, train loss: 847.950227, test loss: 141.772949
epoch: 10, train loss: 726.969373, test loss: 122.072018
epoch: 15, train loss: 648.771695, test loss: 109.252305
epoch: 20, train loss: 594.749826, test loss: 100.272672
current lambda is 0.300000
epoch: 5, train loss: 919.303117, test loss: 151.928670
epoch: 10, train loss: 724.889319, test loss: 121.115485
epoch: 15, train loss: 628.610794, test loss: 105.825349
epoch: 20, train loss: 579.749638, test loss: 98.021891
current lambda is 0.400000
epoch: 5, train loss: 947.593107, test loss: 154.892589
epoch: 10, train loss: 698.521280, test loss: 116.402616
epoch: 15, train loss: 601.780174, test loss: 101.440550
epoch: 20, train loss: 563.823775, test loss: 95.558612
current lambda is 0.500000
epoch: 5, train loss: 955.645427, test loss: 154.620111
epoch: 10, train loss: 668.631307, test loss: 111.333594
epoch: 15, train loss: 580.472812, test loss: 98.031945
epoch: 20, train loss: 553.217715, test loss: 93.916108
#结果可视化
x = np.linspace(0,len(Train_ls[1]),len(Train_ls[1]))
plt.figure(figsize=(10,8))
for i in range(0,len(Lamda)):
    plt.plot(x,Train_ls[i],label= f'L2_Regularization:{Lamda [i]}',linewidth=1.5)
    plt.xlabel('different epoch')
    plt.ylabel('loss')
plt.legend(loc=2, bbox_to_anchor=(1.1,1.0),borderaxespad = 0.)
plt.title('train loss with L2_penalty')
plt.show()

png

12、在多分类任务实验中用torch.nn实现 𝑳𝟐 正则化

#导入必要的包
import torch
import torch.nn as nn
import torchvision 
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.utils.data import DataLoader,TensorDataset 
import numpy as np
%matplotlib inline
#读取数据,将数据放在GPU上
train_dataset = torchvision.datasets.MNIST(
    root = './data',
    train = True,
    transform = transforms.ToTensor(),
    download = False,
)
test_dataset = torchvision.datasets.MNIST(
    root = './data',
    train = False,
    download = False,
    transform = transforms.ToTensor(),
)
print(train_dataset.data.shape)
print(train_dataset.targets.shape)
device='cuda:0'
train_loader = DataLoader(train_dataset,batch_size= 64,shuffle=False)
test_loader = DataLoader(test_dataset,batch_size= 64,shuffle= True)
torch.Size([60000, 28, 28])
torch.Size([60000])
#定义模型
class LinearNet(nn.Module):
    def __init__(self,num_input,num_hidden,num_output):
        super(LinearNet,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden).to(device)
        self.linear2 =nn.Linear(num_hidden,num_output).to(device)
        self.relu = nn.ReLU()
        self.flatten = nn.Flatten()
    def forward(self,x):
        out = self.flatten(x)
        out = self.relu(self.linear1(out))
        out = self.linear2(out)
        return out  
#定义隐藏单元个数
num_input,num_hidden ,num_output = 784,256,10

#把模型移动到GPU上
net = LinearNet(num_input,num_hidden,num_output).to(device = 'cuda:0')

for param in net.state_dict():
    print(param)
loss = nn.CrossEntropyLoss()
num_epochs = 100
net = LinearNet(num_input,num_hidden,num_output)
param_w = [net.linear1.weight,net.linear2.weight]
param_b = [net.linear1.bias,net.linear2.bias]

#优化器中的weight_decay就是L2惩罚系数
optimzer_w = torch.optim.SGD(param_w,lr=0.001,weight_decay=0.01)
optimzer_b = torch.optim.Adam(param_b,lr=0.001)
linear1.weight
linear1.bias
linear2.weight
linear2.bias
#定义训练函数
def train(net,num_epochs):
    train_ls,test_ls = [],[]
    for epoch in range(num_epochs):
        ls = 0
        for x ,y in train_loader:
            x,y = x.cuda(),y.cuda()
            y_pred = net(x)
            l = loss(y_pred,y)
            optimzer_w.zero_grad()
            optimzer_b.zero_grad()
            l.backward()
            optimzer_w.step()
            optimzer_b.step()
            ls += l.item()
        train_ls.append(ls)
        
        ls = 0
        for x ,y in test_loader:
            x,y = x.cuda(),y.cuda()
            y_pred = net(x)
            l = loss(y_pred,y)
            l += l.item()
            ls += l.item()
        test_ls.append(ls)
        print('epoch: %d, train loss: %f, test loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
#开始训练
train(net,num_epochs)
epoch: 1, train loss: 2001.697572, test loss: 621.575668
epoch: 2, train loss: 1707.547080, test loss: 514.371074
epoch: 3, train loss: 1395.036772, test loss: 413.423642
epoch: 4, train loss: 1132.538627, test loss: 337.378651
epoch: 5, train loss: 944.169029, test loss: 285.315160
epoch: 6, train loss: 814.725842, test loss: 249.710284
epoch: 7, train loss: 724.510131, test loss: 224.337959
epoch: 8, train loss: 659.504518, test loss: 205.223462
epoch: 9, train loss: 610.948320, test loss: 190.981990
epoch: 10, train loss: 573.513435, test loss: 180.106735
epoch: 11, train loss: 543.868605, test loss: 171.365534
epoch: 12, train loss: 519.854186, test loss: 164.037049
epoch: 13, train loss: 500.031345, test loss: 157.933060
epoch: 14, train loss: 483.410417, test loss: 153.038633
epoch: 15, train loss: 469.286025, test loss: 148.900287
epoch: 16, train loss: 457.143643, test loss: 145.033802
epoch: 17, train loss: 446.599180, test loss: 141.364731
epoch: 18, train loss: 437.360557, test loss: 138.547127
epoch: 19, train loss: 429.201274, test loss: 136.312577
epoch: 20, train loss: 421.944123, test loss: 134.226587
epoch: 21, train loss: 415.448416, test loss: 131.936863
epoch: 22, train loss: 409.601073, test loss: 130.154684
epoch: 23, train loss: 404.309634, test loss: 128.949102
epoch: 24, train loss: 399.498344, test loss: 127.715302
epoch: 25, train loss: 395.103066, test loss: 126.138051
epoch: 26, train loss: 391.074143, test loss: 124.326077
epoch: 27, train loss: 387.367641, test loss: 123.122752
epoch: 28, train loss: 383.945540, test loss: 122.555764
epoch: 29, train loss: 380.776078, test loss: 121.682829
epoch: 30, train loss: 377.832632, test loss: 120.116068
epoch: 31, train loss: 375.090383, test loss: 119.597140
epoch: 32, train loss: 372.529611, test loss: 118.574005
epoch: 33, train loss: 370.134571, test loss: 118.137687
epoch: 34, train loss: 367.888852, test loss: 116.894420
epoch: 35, train loss: 365.778600, test loss: 116.432212
epoch: 36, train loss: 363.790909, test loss: 115.922204
epoch: 37, train loss: 361.915718, test loss: 115.259059
epoch: 38, train loss: 360.144506, test loss: 114.800898
epoch: 39, train loss: 358.468123, test loss: 114.440953
epoch: 40, train loss: 356.879227, test loss: 113.634554
epoch: 41, train loss: 355.371290, test loss: 113.481968
epoch: 42, train loss: 353.938527, test loss: 113.227021
epoch: 43, train loss: 352.574335, test loss: 112.564234
epoch: 44, train loss: 351.274136, test loss: 112.184294
epoch: 45, train loss: 350.033080, test loss: 112.403445
epoch: 46, train loss: 348.847501, test loss: 111.757733
epoch: 47, train loss: 347.713230, test loss: 111.065694
epoch: 48, train loss: 346.627047, test loss: 111.117789
epoch: 49, train loss: 345.585414, test loss: 110.387768
epoch: 50, train loss: 344.585683, test loss: 110.150029
epoch: 51, train loss: 343.625813, test loss: 110.171010
epoch: 52, train loss: 342.704207, test loss: 109.423152
epoch: 53, train loss: 341.817121, test loss: 109.093156
epoch: 54, train loss: 340.962354, test loss: 109.098866
epoch: 55, train loss: 340.138734, test loss: 108.821617
epoch: 56, train loss: 339.344112, test loss: 108.661929
epoch: 57, train loss: 338.576571, test loss: 108.618817
epoch: 58, train loss: 337.835490, test loss: 108.396909
epoch: 59, train loss: 337.119735, test loss: 107.765113
epoch: 60, train loss: 336.427834, test loss: 107.451356
epoch: 61, train loss: 335.758314, test loss: 107.582443
epoch: 62, train loss: 335.110022, test loss: 107.200049
epoch: 63, train loss: 334.481738, test loss: 108.041694
epoch: 64, train loss: 333.873131, test loss: 107.059809
epoch: 65, train loss: 333.282529, test loss: 106.715729
epoch: 66, train loss: 332.708627, test loss: 106.774913
epoch: 67, train loss: 332.150660, test loss: 107.881839
epoch: 68, train loss: 331.608824, test loss: 106.950642
epoch: 69, train loss: 331.081160, test loss: 106.019694
epoch: 70, train loss: 330.567372, test loss: 106.086395
epoch: 71, train loss: 330.066805, test loss: 105.680815
epoch: 72, train loss: 329.577782, test loss: 105.780471
epoch: 73, train loss: 329.100767, test loss: 105.457635
epoch: 74, train loss: 328.635256, test loss: 105.388450
epoch: 75, train loss: 328.181088, test loss: 105.881846
epoch: 76, train loss: 327.737287, test loss: 105.371824
epoch: 77, train loss: 327.303382, test loss: 105.236389
epoch: 78, train loss: 326.879403, test loss: 104.821135
epoch: 79, train loss: 326.464428, test loss: 104.542021
epoch: 80, train loss: 326.058007, test loss: 104.577631
epoch: 81, train loss: 325.660404, test loss: 104.580984
epoch: 82, train loss: 325.271360, test loss: 104.404023
epoch: 83, train loss: 324.889993, test loss: 104.492519
epoch: 84, train loss: 324.516706, test loss: 104.298668
epoch: 85, train loss: 324.151005, test loss: 104.305095
epoch: 86, train loss: 323.792568, test loss: 103.965192
epoch: 87, train loss: 323.440790, test loss: 103.850653
epoch: 88, train loss: 323.095753, test loss: 103.602734
epoch: 89, train loss: 322.756359, test loss: 103.486373
epoch: 90, train loss: 322.423196, test loss: 104.198425
epoch: 91, train loss: 322.096453, test loss: 103.994724
epoch: 92, train loss: 321.775488, test loss: 103.392581
epoch: 93, train loss: 321.459277, test loss: 103.178122
epoch: 94, train loss: 321.148185, test loss: 103.413968
epoch: 95, train loss: 320.841737, test loss: 103.398924
epoch: 96, train loss: 320.539157, test loss: 103.068504
epoch: 97, train loss: 320.241019, test loss: 102.974626
epoch: 98, train loss: 319.946716, test loss: 103.158813
epoch: 99, train loss: 319.656679, test loss: 103.411706
epoch: 100, train loss: 319.371137, test loss: 102.594361

L2正则化的功效作用:

在深度学习中,用的比较多的正则化技术是L2正则化,其形式是在原先的损失函数后边再加多一项:12𝜆𝜃2𝑖12λθi2,那加上L2正则项的损失函数就可以表示为:𝐿(𝜃)=𝐿(𝜃)+𝜆∑𝑛𝑖𝜃2𝑖L(θ)=L(θ)+λ∑inθi2,其中𝜃θ就是网络层的待学习的参数,𝜆λ则控制正则项的大小,较大的取值将较大程度约束模型复杂度,反之亦然。

L2约束通常对稀疏的有尖峰的权重向量施加大的惩罚,而偏好于均匀的参数。这样的效果是鼓励神经单元利用上层的所有输入,而不是部分输入。所以L2正则项加入之后,权重的绝对值大小就会整体倾向于减少,尤其不会出现特别大的值(比如噪声),即网络偏向于学习比较小的权重。所以L2正则化在深度学习中还有个名字叫做“权重衰减”(weight decay),也有一种理解这种衰减是对权值的一种惩罚,所以有些书里把L2正则化的这一项叫做惩罚项(penalty)。

我们通过一个例子形象理解一下L2正则化的作用,考虑一个只有两个参数𝑤1w1和𝑤2w2的模型,其损失函数曲面如下图所示。从a可以看出,最小值所在是一条线,整个曲面看起来就像是一个山脊。那么这样的山脊曲面就会对应无数个参数组合,单纯使用梯度下降法难以得到确定解。但是这样的目标函数若加上一项0.1×(𝑤21+𝑤22)0.1×(w12+w22),则曲面就会变成b图的曲面,最小值所在的位置就会从一条山岭变成一个山谷了,此时我们搜索该目标函数的最小值就比先前容易了,所以L2正则化在机器学习中也叫做“岭回归”(ridge regression)。

13、对回归模型采用十折交叉验证评估

#导入必要的包
import torch 
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,TensorDataset
from sklearn.model_selection import train_test_split
from collections import OrderedDict
from torch.nn import init
import torch.utils.data as Data  
#定义获取每折的训练集测试集数据的函数
def get_kfold_data(k, i, X, y):
    fold_size = X.shape[0] // k
    
    val_start = i * fold_size
    if i != k - 1:
        val_end = (i + 1) * fold_size
        X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
        X_train = torch.cat((X[0:val_start], X[val_end:]), dim = 0)
        y_train = torch.cat((y[0:val_start], y[val_end:]), dim = 0)
    else:
        X_valid, y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
        
    return X_train, y_train, X_valid, y_valid
#定义多折交叉验证函数
def k_fold(k, X, y):
    
    train_loss_sum, valid_loss_sum = 0, 0
    train_acc_sum, valid_acc_sum = 0, 0
    
    data = []
    train_loss_to_data = []
    valid_loss_to_data = []
    train_acc_to_data = []
    valid_acc_to_data = []
    
    
    for i in range(k):
        print('第', i + 1,'折验证结果')
        X_train, y_train, X_valid, y_valid = get_kfold_data(k, i, X, y)
        dataset = Data.TensorDataset(X_train, y_train)  
        train_iter = Data.DataLoader(  
            dataset=dataset, # torch TensorDataset format  
            batch_size=batch_size, # mini batch size  
            shuffle=True, # 是否打乱数据 (训练集一般需要进行打乱)  
            num_workers=0, # 多线程来读数据, 注意在Windows下需要设置为0  
        )  
        # 将测试数据的特征和标签组合  
        dataset = Data.TensorDataset(X_valid, y_valid)  
        # 把 dataset 放入 DataLoader  
        test_iter = Data.DataLoader(  
            dataset=dataset, # torch TensorDataset format  
            batch_size=batch_size, # mini batch size  
            shuffle=True, # 是否打乱数据 
            num_workers=0, # 多线程来读数据, 注意在Windows下需要设置为0  
        )
        train_loss, val_loss, train_acc, val_acc = train(model,train_iter,test_iter,loss,num_epochs,batch_size,lr)
        
        train_loss_to_data.append(train_loss)
        valid_loss_to_data.append(val_loss)
        train_acc_to_data.append(train_acc.detach().numpy())
        valid_acc_to_data.append(val_acc.detach().numpy())
        
        train_loss_sum += train_loss
        valid_loss_sum += val_loss
        train_acc_sum += train_acc
        valid_acc_sum += val_acc
    
    print('\n','最终k折交叉验证结果:')
    
    print('average train loss:{:.4f}, average train accuracy:{:.3f}%%'.format(train_loss_sum/k, train_acc_sum/k*100))
    print('average valid loss:{:.4f}, average valid accuracy:{:.3f}%%'.format(valid_loss_sum/k, valid_acc_sum/k*100))
    
    data.append(train_loss_to_data)
    data.append(valid_loss_to_data)
    data.append(train_acc_to_data)
    data.append(valid_acc_to_data)
    
    return data
#定义训练函数
def train(model,train_iter,test_iter,loss,num_epochs,batch_size,lr):
    train_ls,test_ls = [],[]
    train_ac, test_ac = [],[]
    for epoch in range(num_epochs):
        train_ls_sum ,test_ls_sum = 0,0
        train_ac_sum ,test_ac_sum = 0,0
        n_train, n_test = 0,0
        for x,y in train_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum += l.item()
            ac = (1-abs(y_pred - y)/y).mean()
            train_ac_sum += ac
            n_train+=1
        for x ,y in test_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
            ac = (1-abs(y_pred - y)/y).mean()
            test_ac_sum += ac
            n_test+=1
        train_ls.append(train_ls_sum)
        test_ls.append(test_ls_sum)
        train_ac.append(train_ac_sum/n_train)
        test_ac.append(test_ac_sum/n_test)
        print('epoch %d,train_loss %.6f, train_acc %.6f %%, test_loss %f, test_acc %f %%'
              %(epoch+1, train_ls[epoch], train_ac[epoch]*100, test_ls[epoch], train_ac[epoch]*100))
    return train_ls[epoch], test_ls[epoch], train_ac[epoch], test_ac[epoch]
#定义数据集
num_input ,num_example = 500,10000
true_w = torch.ones(1,num_input)*0.0056
true_b = 0.028
x_data = torch.tensor(np.random.normal(0,0.001,size  = (num_example,num_input)),dtype = torch.float32)
y = torch.mm(x_data,true_w.t()) +true_b
y += torch.normal(0,0.001,y.shape)
#train_x,test_x,train_y,test_y = train_test_split(x_data,y,shuffle= True,test_size=0.3)
#定义模型
model= nn.Sequential(OrderedDict([
    ('linear1',nn.Linear(num_input,256)),
    ('linear2',nn.Linear(256,128)),
    ('linear3',nn.Linear(128,1)),
])
)
#初始化参数
for param in model.parameters():
    init.normal_(param,mean = 0 ,std = 0.001)
k = 10 #折数
lr = 0.001 #学习率
batch_size = 50 #批量大小
num_epochs = 10 #训练次数
loss = nn.MSELoss() #损失函数
optimizer = torch.optim.SGD(model.parameters(),lr) #优化器
#开始训练和验证
data = k_fold(k, x_data, y)
第 1 折验证结果
epoch 1,train_loss 0.000190, train_acc 97.057892 %, test_loss 0.000020, test_acc 97.057892 %
epoch 2,train_loss 0.000190, train_acc 97.057404 %, test_loss 0.000020, test_acc 97.057404 %
epoch 3,train_loss 0.000190, train_acc 97.057106 %, test_loss 0.000020, test_acc 97.057106 %
epoch 4,train_loss 0.000190, train_acc 97.056755 %, test_loss 0.000020, test_acc 97.056755 %
epoch 5,train_loss 0.000190, train_acc 97.056961 %, test_loss 0.000020, test_acc 97.056961 %
epoch 6,train_loss 0.000190, train_acc 97.057014 %, test_loss 0.000020, test_acc 97.057014 %
epoch 7,train_loss 0.000190, train_acc 97.056816 %, test_loss 0.000020, test_acc 97.056816 %
epoch 8,train_loss 0.000190, train_acc 97.056839 %, test_loss 0.000020, test_acc 97.056839 %
epoch 9,train_loss 0.000190, train_acc 97.056557 %, test_loss 0.000020, test_acc 97.056557 %
epoch 10,train_loss 0.000190, train_acc 97.056702 %, test_loss 0.000020, test_acc 97.056702 %
第 2 折验证结果
epoch 1,train_loss 0.000188, train_acc 97.075577 %, test_loss 0.000022, test_acc 97.075577 %
epoch 2,train_loss 0.000188, train_acc 97.075630 %, test_loss 0.000022, test_acc 97.075630 %
epoch 3,train_loss 0.000188, train_acc 97.075356 %, test_loss 0.000022, test_acc 97.075356 %
epoch 4,train_loss 0.000188, train_acc 97.075165 %, test_loss 0.000022, test_acc 97.075165 %
epoch 5,train_loss 0.000188, train_acc 97.075233 %, test_loss 0.000022, test_acc 97.075233 %
epoch 6,train_loss 0.000188, train_acc 97.075073 %, test_loss 0.000022, test_acc 97.075073 %
epoch 7,train_loss 0.000188, train_acc 97.075043 %, test_loss 0.000022, test_acc 97.075043 %
epoch 8,train_loss 0.000188, train_acc 97.075150 %, test_loss 0.000022, test_acc 97.075150 %
epoch 9,train_loss 0.000188, train_acc 97.075035 %, test_loss 0.000022, test_acc 97.075035 %
epoch 10,train_loss 0.000188, train_acc 97.074913 %, test_loss 0.000022, test_acc 97.074913 %
第 3 折验证结果
epoch 1,train_loss 0.000190, train_acc 97.053711 %, test_loss 0.000020, test_acc 97.053711 %
epoch 2,train_loss 0.000190, train_acc 97.053871 %, test_loss 0.000020, test_acc 97.053871 %
epoch 3,train_loss 0.000190, train_acc 97.053894 %, test_loss 0.000020, test_acc 97.053894 %
epoch 4,train_loss 0.000190, train_acc 97.054031 %, test_loss 0.000020, test_acc 97.054031 %
epoch 5,train_loss 0.000190, train_acc 97.054092 %, test_loss 0.000020, test_acc 97.054092 %
epoch 6,train_loss 0.000190, train_acc 97.054062 %, test_loss 0.000020, test_acc 97.054062 %
epoch 7,train_loss 0.000190, train_acc 97.054169 %, test_loss 0.000020, test_acc 97.054169 %
epoch 8,train_loss 0.000190, train_acc 97.054070 %, test_loss 0.000020, test_acc 97.054070 %
epoch 9,train_loss 0.000190, train_acc 97.054298 %, test_loss 0.000020, test_acc 97.054298 %
epoch 10,train_loss 0.000190, train_acc 97.054108 %, test_loss 0.000020, test_acc 97.054108 %
第 4 折验证结果
epoch 1,train_loss 0.000190, train_acc 97.057098 %, test_loss 0.000020, test_acc 97.057098 %
epoch 2,train_loss 0.000190, train_acc 97.057083 %, test_loss 0.000020, test_acc 97.057083 %
epoch 3,train_loss 0.000190, train_acc 97.057419 %, test_loss 0.000020, test_acc 97.057419 %
epoch 4,train_loss 0.000190, train_acc 97.057343 %, test_loss 0.000020, test_acc 97.057343 %
epoch 5,train_loss 0.000190, train_acc 97.057220 %, test_loss 0.000020, test_acc 97.057220 %
epoch 6,train_loss 0.000190, train_acc 97.057327 %, test_loss 0.000020, test_acc 97.057327 %
epoch 7,train_loss 0.000190, train_acc 97.057167 %, test_loss 0.000020, test_acc 97.057167 %
epoch 8,train_loss 0.000190, train_acc 97.057175 %, test_loss 0.000020, test_acc 97.057175 %
epoch 9,train_loss 0.000190, train_acc 97.057327 %, test_loss 0.000020, test_acc 97.057327 %
epoch 10,train_loss 0.000190, train_acc 97.057159 %, test_loss 0.000020, test_acc 97.057159 %
第 5 折验证结果
epoch 1,train_loss 0.000189, train_acc 97.059090 %, test_loss 0.000021, test_acc 97.059090 %
epoch 2,train_loss 0.000189, train_acc 97.058792 %, test_loss 0.000021, test_acc 97.058792 %
epoch 3,train_loss 0.000189, train_acc 97.058868 %, test_loss 0.000021, test_acc 97.058868 %
epoch 4,train_loss 0.000189, train_acc 97.058800 %, test_loss 0.000021, test_acc 97.058800 %
epoch 5,train_loss 0.000189, train_acc 97.058746 %, test_loss 0.000021, test_acc 97.058746 %
epoch 6,train_loss 0.000189, train_acc 97.058746 %, test_loss 0.000021, test_acc 97.058746 %
epoch 7,train_loss 0.000189, train_acc 97.058968 %, test_loss 0.000021, test_acc 97.058968 %
epoch 8,train_loss 0.000189, train_acc 97.058571 %, test_loss 0.000021, test_acc 97.058571 %
epoch 9,train_loss 0.000189, train_acc 97.058578 %, test_loss 0.000021, test_acc 97.058578 %
epoch 10,train_loss 0.000189, train_acc 97.058525 %, test_loss 0.000021, test_acc 97.058525 %
第 6 折验证结果
epoch 1,train_loss 0.000190, train_acc 97.062927 %, test_loss 0.000021, test_acc 97.062927 %
epoch 2,train_loss 0.000190, train_acc 97.062775 %, test_loss 0.000021, test_acc 97.062775 %
epoch 3,train_loss 0.000190, train_acc 97.062965 %, test_loss 0.000021, test_acc 97.062965 %
epoch 4,train_loss 0.000190, train_acc 97.063049 %, test_loss 0.000021, test_acc 97.063049 %
epoch 5,train_loss 0.000190, train_acc 97.062820 %, test_loss 0.000021, test_acc 97.062820 %
epoch 6,train_loss 0.000190, train_acc 97.063210 %, test_loss 0.000021, test_acc 97.063210 %
epoch 7,train_loss 0.000190, train_acc 97.062897 %, test_loss 0.000021, test_acc 97.062897 %
epoch 8,train_loss 0.000190, train_acc 97.062943 %, test_loss 0.000021, test_acc 97.062943 %
epoch 9,train_loss 0.000190, train_acc 97.063026 %, test_loss 0.000021, test_acc 97.063026 %
epoch 10,train_loss 0.000190, train_acc 97.062988 %, test_loss 0.000021, test_acc 97.062988 %
第 7 折验证结果
epoch 1,train_loss 0.000189, train_acc 97.065422 %, test_loss 0.000022, test_acc 97.065422 %
epoch 2,train_loss 0.000189, train_acc 97.065453 %, test_loss 0.000022, test_acc 97.065453 %
epoch 3,train_loss 0.000189, train_acc 97.065613 %, test_loss 0.000022, test_acc 97.065613 %
epoch 4,train_loss 0.000189, train_acc 97.065773 %, test_loss 0.000022, test_acc 97.065773 %
epoch 5,train_loss 0.000189, train_acc 97.065536 %, test_loss 0.000022, test_acc 97.065536 %
epoch 6,train_loss 0.000189, train_acc 97.066055 %, test_loss 0.000022, test_acc 97.066055 %
epoch 7,train_loss 0.000189, train_acc 97.065659 %, test_loss 0.000022, test_acc 97.065659 %
epoch 8,train_loss 0.000189, train_acc 97.065681 %, test_loss 0.000022, test_acc 97.065681 %
epoch 9,train_loss 0.000189, train_acc 97.066032 %, test_loss 0.000022, test_acc 97.066032 %
epoch 10,train_loss 0.000189, train_acc 97.065895 %, test_loss 0.000022, test_acc 97.065895 %
第 8 折验证结果
epoch 1,train_loss 0.000188, train_acc 97.064209 %, test_loss 0.000022, test_acc 97.064209 %
epoch 2,train_loss 0.000188, train_acc 97.064148 %, test_loss 0.000022, test_acc 97.064148 %
epoch 3,train_loss 0.000188, train_acc 97.064308 %, test_loss 0.000022, test_acc 97.064308 %
epoch 4,train_loss 0.000188, train_acc 97.064056 %, test_loss 0.000022, test_acc 97.064056 %
epoch 5,train_loss 0.000188, train_acc 97.064102 %, test_loss 0.000022, test_acc 97.064102 %
epoch 6,train_loss 0.000188, train_acc 97.064034 %, test_loss 0.000022, test_acc 97.064034 %
epoch 7,train_loss 0.000188, train_acc 97.064011 %, test_loss 0.000022, test_acc 97.064011 %
epoch 8,train_loss 0.000188, train_acc 97.063972 %, test_loss 0.000022, test_acc 97.063972 %
epoch 9,train_loss 0.000188, train_acc 97.064034 %, test_loss 0.000022, test_acc 97.064034 %
epoch 10,train_loss 0.000188, train_acc 97.063980 %, test_loss 0.000022, test_acc 97.063980 %
第 9 折验证结果
epoch 1,train_loss 0.000189, train_acc 97.060158 %, test_loss 0.000021, test_acc 97.060158 %
epoch 2,train_loss 0.000189, train_acc 97.060257 %, test_loss 0.000021, test_acc 97.060257 %
epoch 3,train_loss 0.000189, train_acc 97.060219 %, test_loss 0.000021, test_acc 97.060219 %
epoch 4,train_loss 0.000189, train_acc 97.060272 %, test_loss 0.000021, test_acc 97.060272 %
epoch 5,train_loss 0.000189, train_acc 97.060417 %, test_loss 0.000021, test_acc 97.060417 %
epoch 6,train_loss 0.000189, train_acc 97.060242 %, test_loss 0.000021, test_acc 97.060242 %
epoch 7,train_loss 0.000189, train_acc 97.060272 %, test_loss 0.000021, test_acc 97.060272 %
epoch 8,train_loss 0.000189, train_acc 97.060387 %, test_loss 0.000021, test_acc 97.060387 %
epoch 9,train_loss 0.000189, train_acc 97.060242 %, test_loss 0.000021, test_acc 97.060242 %
epoch 10,train_loss 0.000189, train_acc 97.060356 %, test_loss 0.000021, test_acc 97.060356 %
第 10 折验证结果
epoch 1,train_loss 0.000189, train_acc 97.064194 %, test_loss 0.000021, test_acc 97.064194 %
epoch 2,train_loss 0.000189, train_acc 97.064102 %, test_loss 0.000021, test_acc 97.064102 %
epoch 3,train_loss 0.000189, train_acc 97.063850 %, test_loss 0.000021, test_acc 97.063850 %
epoch 4,train_loss 0.000189, train_acc 97.063583 %, test_loss 0.000021, test_acc 97.063583 %
epoch 5,train_loss 0.000189, train_acc 97.063568 %, test_loss 0.000021, test_acc 97.063568 %
epoch 6,train_loss 0.000189, train_acc 97.063545 %, test_loss 0.000021, test_acc 97.063545 %
epoch 7,train_loss 0.000189, train_acc 97.063568 %, test_loss 0.000021, test_acc 97.063568 %
epoch 8,train_loss 0.000189, train_acc 97.063583 %, test_loss 0.000021, test_acc 97.063583 %
epoch 9,train_loss 0.000189, train_acc 97.063652 %, test_loss 0.000021, test_acc 97.063652 %
epoch 10,train_loss 0.000189, train_acc 97.063431 %, test_loss 0.000021, test_acc 97.063431 %

 最终k折交叉验证结果:
average train loss:0.0002, average train accuracy:97.062%%
average valid loss:0.0000, average valid accuracy:97.061%%
#导入表格需要的包
import pandas as pd
import numpy as np
import os
#定义数据框架
name = []
for i in range(k):
    name.append("第"+str(i+1)+"折")
dataframe = {"name": name,
        "train_loss": data[0],
        "valid_loss": data[1],
        "train_acc": data[2],
        "loss_acc": data[3],}
frame = pd.DataFrame(dataframe)
frame.to_csv("./前馈神经网络十折交叉验证模型_回归.csv", index=False)

#显示表格
pd.read_csv("./前馈神经网络十折交叉验证模型_回归.csv")
name train_loss valid_loss train_acc loss_acc
0 第1折 0.000190 0.000020 0.970567 0.971112
1 第2折 0.000188 0.000022 0.970749 0.969335
2 第3折 0.000190 0.000020 0.970541 0.971293
3 第4折 0.000190 0.000020 0.970572 0.971041
4 第5折 0.000189 0.000021 0.970585 0.970869
5 第6折 0.000190 0.000021 0.970630 0.970507
6 第7折 0.000189 0.000022 0.970659 0.970307
7 第8折 0.000188 0.000022 0.970640 0.970442
8 第9折 0.000189 0.000021 0.970603 0.970793
9 第10折 0.000189 0.000021 0.970634 0.970437

14、对二分类模型采用十折交叉验证评估

#导入必要的包
import torch 
import torch.nn as nn
from torch.utils.data import TensorDataset,DataLoader
from torch.nn import init
import torch.optim as optim
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
#创建数据集
num_inputs,num_example = 200,10000
x1 = torch.normal(2,2,(num_example,num_inputs))
y1 = torch.ones((num_example,1))
x2 = torch.normal(-2,2,(num_example,num_inputs))
y2 = torch.zeros((num_example,1))
x_data = torch.cat((x1,x2),dim=0)
y_data = torch.cat((y1,y2),dim = 0)
#train_x,test_x,train_y,test_y = train_test_split(x_data,y_data,shuffle=True,test_size=0.3,stratify=y_data)
#定义数据迭代器
batch_size = 256
train_dataset = TensorDataset(train_x,train_y)
train_iter = DataLoader(
    dataset = train_dataset,
    shuffle = True,
    num_workers = 0,
    batch_size = batch_size
)
test_dataset = TensorDataset(test_x,test_y)
test_iter = DataLoader(
    dataset = test_dataset,
    shuffle = True,
    num_workers = 0,
    batch_size = batch_size
)
#定义模型
num_input,num_hidden,num_output = 200,256,1
class net(nn.Module):
    def __init__(self,num_input,num_hidden,num_output):
        super(net,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden,bias =False)
        self.linear2 = nn.Linear(num_hidden,num_output,bias=False)
    def forward(self,input):
        out = self.linear1(input)
        out = self.linear2(out)
        return out
model = net(num_input,num_hidden,num_output)
print(model)
net(
  (linear1): Linear(in_features=200, out_features=256, bias=False)
  (linear2): Linear(in_features=256, out_features=1, bias=False)
)
#初始化参数
for param in model.parameters():
    init.normal_(param,mean=0,std=0.001)
#定义训练函数
lr = 0.001
loss = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(),lr)
def train(net,train_iter,test_iter,loss,num_epochs,batch_size):
    train_ls,test_ls,train_acc,test_acc = [],[],[],[]
    for epoch in range(num_epochs):
        train_ls_sum,train_acc_sum,n = 0,0,0
        for x,y in train_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum +=l.item()
            train_acc_sum += (((y_pred>0.5)==y)+0.0).sum().item()
            n += y_pred.shape[0]
        train_ls.append(train_ls_sum)
        train_acc.append(train_acc_sum/n)
        
        test_ls_sum,test_acc_sum,n = 0,0,0
        for x,y in test_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
            test_acc_sum += (((y_pred>0.5)==y)+0.0).sum().item()
            n += y_pred.shape[0]
        test_ls.append(test_ls_sum)
        test_acc.append(test_acc_sum/n)
        print('epoch %d, train_loss %.6f,test_loss %f, train_acc %.6f,test_acc %f'
              %(epoch+1, train_ls[epoch],test_ls[epoch], train_acc[epoch],test_acc[epoch]))
    return train_ls[epoch],test_ls[epoch],train_acc[epoch],test_acc[epoch]
#定义获取每折的训练集测试集数据的函数
def get_kfold_data(k, i, X, y):
    fold_size = X.shape[0] // k
    
    val_start = i * fold_size
    if i != k - 1:
        val_end = (i + 1) * fold_size
        X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
        X_train = torch.cat((X[0:val_start], X[val_end:]), dim = 0)
        y_train = torch.cat((y[0:val_start], y[val_end:]), dim = 0)
    else:
        X_valid, y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
        
    return X_train, y_train, X_valid, y_valid
#定义多折交叉验证函数
def k_fold(k, X, y):
    
    train_loss_sum, valid_loss_sum = 0, 0
    train_acc_sum, valid_acc_sum = 0, 0
    
    data = []
    train_loss_to_data = []
    valid_loss_to_data = []
    train_acc_to_data = []
    valid_acc_to_data = []
    
    
    for i in range(k):
        print('第', i + 1,'折验证结果')
        X_train, y_train, X_valid, y_valid = get_kfold_data(k, i, X, y)
        dataset = Data.TensorDataset(X_train, y_train)  
        train_iter = Data.DataLoader(  
            dataset=dataset, # torch TensorDataset format  
            batch_size=batch_size, # mini batch size  
            shuffle=True, # 是否打乱数据 (训练集一般需要进行打乱)  
            num_workers=0, # 多线程来读数据, 注意在Windows下需要设置为0  
        )  
        # 将测试数据的特征和标签组合  
        dataset = Data.TensorDataset(X_valid, y_valid)  
        # 把 dataset 放入 DataLoader  
        test_iter = Data.DataLoader(  
            dataset=dataset, # torch TensorDataset format  
            batch_size=batch_size, # mini batch size  
            shuffle=True, # 是否打乱数据 
            num_workers=0, # 多线程来读数据, 注意在Windows下需要设置为0  
        )
        train_loss, val_loss, train_acc, val_acc = train(model,train_iter,test_iter,loss,num_epochs,batch_size)
        
        train_loss_to_data.append(train_loss)
        valid_loss_to_data.append(val_loss)
        train_acc_to_data.append(train_acc)
        valid_acc_to_data.append(val_acc)
        
        train_loss_sum += train_loss
        valid_loss_sum += val_loss
        train_acc_sum += train_acc
        valid_acc_sum += val_acc
    
    print('\n','最终k折交叉验证结果:')
    
    print('average train loss:{:.4f}, average train accuracy:{:.3f}%%'.format(train_loss_sum/k, train_acc_sum/k*100))
    print('average valid loss:{:.4f}, average valid accuracy:{:.3f}%%'.format(valid_loss_sum/k, valid_acc_sum/k*100))
    
    data.append(train_loss_to_data)
    data.append(valid_loss_to_data)
    data.append(train_acc_to_data)
    data.append(valid_acc_to_data)
    
    return data
#训练次数和学习率
num_epochs = 10
k = 10

#开始十折交叉验证
data = k_fold(k, x_data, y_data)
第 1 折验证结果
epoch 1, train_loss 48.897869,test_loss 5.451379, train_acc 0.555556,test_acc 0.000000
epoch 2, train_loss 46.698489,test_loss 4.915746, train_acc 0.555556,test_acc 0.000000
epoch 3, train_loss 36.215635,test_loss 2.965694, train_acc 0.701222,test_acc 1.000000
epoch 4, train_loss 17.239373,test_loss 1.144778, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 6.928696,test_loss 0.521981, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 3.529307,test_loss 0.300938, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 2.189813,test_loss 0.200819, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 1.529666,test_loss 0.146688, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 1.150546,test_loss 0.113659, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.910318,test_loss 0.091781, train_acc 1.000000,test_acc 1.000000
第 2 折验证结果
epoch 1, train_loss 0.746089,test_loss 0.076778, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.628100,test_loss 0.065452, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.540279,test_loss 0.056781, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.472010,test_loss 0.049975, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.417988,test_loss 0.044532, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.374184,test_loss 0.040043, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.337760,test_loss 0.036285, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.307335,test_loss 0.033167, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.281762,test_loss 0.030509, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.259887,test_loss 0.028225, train_acc 1.000000,test_acc 1.000000
第 3 折验证结果
epoch 1, train_loss 0.240362,test_loss 0.026453, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.223868,test_loss 0.024671, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.209306,test_loss 0.023142, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.196324,test_loss 0.021730, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.184565,test_loss 0.020465, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.174488,test_loss 0.019368, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.164845,test_loss 0.018354, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.156407,test_loss 0.017442, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.148731,test_loss 0.016596, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.141675,test_loss 0.015826, train_acc 1.000000,test_acc 1.000000
第 4 折验证结果
epoch 1, train_loss 0.135614,test_loss 0.014766, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.129716,test_loss 0.014131, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.124239,test_loss 0.013543, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.119192,test_loss 0.013004, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.114463,test_loss 0.012501, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.110152,test_loss 0.012019, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.106125,test_loss 0.011579, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.102304,test_loss 0.011179, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.098767,test_loss 0.010782, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.095374,test_loss 0.010418, train_acc 1.000000,test_acc 1.000000
第 5 折验证结果
epoch 1, train_loss 0.092196,test_loss 0.010176, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.089223,test_loss 0.009860, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.086408,test_loss 0.009583, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.083749,test_loss 0.009284, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.081330,test_loss 0.009013, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.079039,test_loss 0.008751, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.076824,test_loss 0.008523, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.074709,test_loss 0.008286, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.072698,test_loss 0.008065, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.070734,test_loss 0.007853, train_acc 1.000000,test_acc 1.000000
第 6 折验证结果
epoch 1, train_loss 0.068885,test_loss 0.007750, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.067168,test_loss 0.007554, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.065473,test_loss 0.007366, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.063943,test_loss 0.007202, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.062484,test_loss 0.007024, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.061057,test_loss 0.006876, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.059606,test_loss 0.006715, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.058306,test_loss 0.006574, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.057007,test_loss 0.006441, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.055810,test_loss 0.006304, train_acc 1.000000,test_acc 1.000000
第 7 折验证结果
epoch 1, train_loss 0.054742,test_loss 0.006054, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.053635,test_loss 0.005924, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.052557,test_loss 0.005818, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.051594,test_loss 0.005705, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.050521,test_loss 0.005587, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.049503,test_loss 0.005481, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.048566,test_loss 0.005377, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.047719,test_loss 0.005273, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.046821,test_loss 0.005177, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.045976,test_loss 0.005087, train_acc 1.000000,test_acc 1.000000
第 8 折验证结果
epoch 1, train_loss 0.045206,test_loss 0.004964, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.044472,test_loss 0.004885, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.043670,test_loss 0.004793, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.042918,test_loss 0.004711, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.042202,test_loss 0.004635, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.041503,test_loss 0.004555, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.040824,test_loss 0.004483, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.040203,test_loss 0.004417, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.039556,test_loss 0.004344, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.038931,test_loss 0.004272, train_acc 1.000000,test_acc 1.000000
第 9 折验证结果
epoch 1, train_loss 0.038194,test_loss 0.004317, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.037644,test_loss 0.004253, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.037111,test_loss 0.004185, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.036552,test_loss 0.004127, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.036027,test_loss 0.004071, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.035492,test_loss 0.004009, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.035019,test_loss 0.003955, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.034479,test_loss 0.003900, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.034035,test_loss 0.003844, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.033507,test_loss 0.003790, train_acc 1.000000,test_acc 1.000000
第 10 折验证结果
epoch 1, train_loss 0.033081,test_loss 0.003724, train_acc 1.000000,test_acc 1.000000
epoch 2, train_loss 0.032712,test_loss 0.003667, train_acc 1.000000,test_acc 1.000000
epoch 3, train_loss 0.032234,test_loss 0.003623, train_acc 1.000000,test_acc 1.000000
epoch 4, train_loss 0.031801,test_loss 0.003575, train_acc 1.000000,test_acc 1.000000
epoch 5, train_loss 0.031426,test_loss 0.003530, train_acc 1.000000,test_acc 1.000000
epoch 6, train_loss 0.031012,test_loss 0.003484, train_acc 1.000000,test_acc 1.000000
epoch 7, train_loss 0.030604,test_loss 0.003443, train_acc 1.000000,test_acc 1.000000
epoch 8, train_loss 0.030250,test_loss 0.003401, train_acc 1.000000,test_acc 1.000000
epoch 9, train_loss 0.029886,test_loss 0.003358, train_acc 1.000000,test_acc 1.000000
epoch 10, train_loss 0.029544,test_loss 0.003311, train_acc 1.000000,test_acc 1.000000

 最终k折交叉验证结果:
average train loss:0.1682, average train accuracy:100.000%%
average valid loss:0.0177, average valid accuracy:100.000%%
#导入绘制表格需要的包
import pandas as pd
import numpy as np
import os
#定义数据框架
name = []
for i in range(k):
    name.append("第"+str(i+1)+"折")
dataframe = {"name": name,
        "train_loss": data[0],
        "valid_loss": data[1],
        "train_acc": data[2],
        "loss_acc": data[3],}
frame = pd.DataFrame(dataframe)
frame.to_csv("./前馈神经网络十折交叉验证模型_二分类.csv", index=False)

#显示表格
pd.read_csv("./前馈神经网络十折交叉验证模型_二分类.csv")
name train_loss valid_loss train_acc loss_acc
0 第1折 0.910318 0.091781 1.0 1.0
1 第2折 0.259887 0.028225 1.0 1.0
2 第3折 0.141675 0.015826 1.0 1.0
3 第4折 0.095374 0.010418 1.0 1.0
4 第5折 0.070734 0.007853 1.0 1.0
5 第6折 0.055810 0.006304 1.0 1.0
6 第7折 0.045976 0.005087 1.0 1.0
7 第8折 0.038931 0.004272 1.0 1.0
8 第9折 0.033507 0.003790 1.0 1.0
9 第10折 0.029544 0.003311 1.0 1.0

15、对多分类模型采用十折交叉验证评估

#导入必要的包
import torch
import torch.nn as nn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import random
from pandas import *
%matplotlib inline
#获取数据集
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(),download=False)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform = transforms.ToTensor(),download=False)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False)

#将数据集整合,便于做十折交叉验证
mnist_features = torch.cat((train_loader.dataset.data,test_loader.dataset.data),dim=0)
mnist_labels = torch.cat((train_loader.dataset.train_labels,test_loader.dataset.test_labels))
mnist_features = mnist_features.float()
mnist_labels = mnist_labels.long()
#定义数据迭代器
def get_data_iter(X_train, y_train, X_valid, y_valid,batch_size):
    train_dataset = torch.utils.data.TensorDataset(X_train.cuda(),y_train.cuda())
    test_dataset = torch.utils.data.TensorDataset(X_valid.cuda(),y_valid.cuda())
    train_iter = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True)
    test_iter = torch.utils.data.DataLoader(test_dataset,batch_size=batch_size,shuffle=False)
    return train_iter, test_iter
#定义网络
class LinearNet(nn.Module):
    def __init__(self,num_inputs, num_outputs, num_hiddens):
        super(LinearNet,self).__init__()
        self.linear1 = nn.Linear(num_inputs,num_hiddens)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(num_hiddens,num_outputs)
    
    def forward(self,x):
        x = self.linear1(x)
        x = self.relu(x)
        x = self.linear2(x)
        y = self.relu(x)
        return y
#模型训练
def train(train_iter,test_iter,if_reshape,num_epochs,num_inputs,net,loss):
    optimizer = torch.optim.SGD(net.parameters(),lr=0.001)
    train_ls, test_ls = [], []
    for epoch in range(num_epochs):
        ls, count = 0, 0
        if if_reshape ==False:
            for X,y in train_iter:
                l=loss(net(X),y.view(-1,1))
                optimizer.zero_grad()
                l.backward()
                optimizer.step()
                ls += l.item()
                count += y.shape[0]
            train_ls.append(ls/count)
            ls, count = 0, 0
            for X,y in test_iter:
                l=loss(net(X),y.view(-1,1))
                ls += l.item()
                count += y.shape[0]
        else:
            for X,y in train_iter:
                X = X.reshape(-1,num_inputs)
                l=loss(net(X),y).sum()
                optimizer.zero_grad()
                l.backward()
                optimizer.step()
                ls += l.item()
                count += y.shape[0]
            train_ls.append(ls/count)
            ls, count = 0, 0
            for X,y in test_iter:
                X = X.reshape(-1,num_inputs)
                l=loss(net(X),y).sum()
                ls += l.item()
                count += y.shape[0]
        test_ls.append(ls/count)
        if(epoch+1)%5==0:
            print('epoch: %d, train loss: %f, valid loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
    return train_ls,test_ls
#定义获取每折的训练集测试集数据的函数
def get_kfold_data(k, i, X, y):
    fold_size = X.shape[0]//k
    val_start = i * fold_size
    if i  != k - 1:
        val_end = (i + 1) * fold_size
        X_valid, y_valid = X[val_start:val_end],y[val_start:val_end]
        X_train = torch.cat((X[0:val_start],X[val_end:]),dim=0)
        y_train = torch.cat((y[0:val_start],y[val_end:]),dim=0)
    else:
        X_valid,y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
    
    return X_train, y_train, X_valid, y_valid
#定义多折验证函数
def k_fold(k, X_train, y_train,if_reshape,num_epochs,num_inputs,net,loss):
    my_k_train_ls, my_k_valid_ls = [], []
    train_loss_sum, valid_loss_sum = 0, 0
    for i in range(k):
        print('第', i+1, '折验证结果')
        X_train, y_train, X_valid, y_valid = get_kfold_data(k, i, X_train, y_train)
        train_iter, valid_iter = get_data_iter(X_train, y_train, X_valid, y_valid,batch_size=100)
        train_loss, val_loss = train(train_iter,valid_iter,if_reshape,num_epochs,num_inputs,net,loss)
        
        my_k_train_ls.append(train_loss)
        my_k_valid_ls.append(val_loss)
        train_loss_sum += train_loss[-1]
        valid_loss_sum += val_loss[-1]
    
    print("最终平均k折交叉验证结果")
    
    print(f'average train loss: {train_loss_sum/k}')
    print(f'average valid loss: {valid_loss_sum/k}')
    
    return my_k_train_ls, my_k_valid_ls
k=10 #折数
mynum_epochs= 20 #训练次数
mynet=LinearNet(784, 10, 100).cuda() #网络

#开始十折交叉验证
my_k_train_ls, my_k_valid_ls = k_fold(k, mnist_features, mnist_labels,if_reshape=True, num_epochs=mynum_epochs, num_inputs = 784, net =mynet ,loss=nn.CrossEntropyLoss())
第 1 折验证结果
epoch: 5, train loss: 0.008705, valid loss: 0.008887
epoch: 10, train loss: 0.008101, valid loss: 0.008695
epoch: 15, train loss: 0.007858, valid loss: 0.008651
epoch: 20, train loss: 0.005777, valid loss: 0.006437
第 2 折验证结果
epoch: 5, train loss: 0.005411, valid loss: 0.005872
epoch: 10, train loss: 0.005279, valid loss: 0.005866
epoch: 15, train loss: 0.005202, valid loss: 0.005910
epoch: 20, train loss: 0.005149, valid loss: 0.005931
第 3 折验证结果
epoch: 5, train loss: 0.005105, valid loss: 0.005334
epoch: 10, train loss: 0.005077, valid loss: 0.005362
epoch: 15, train loss: 0.005065, valid loss: 0.005404
epoch: 20, train loss: 0.005049, valid loss: 0.005425
第 4 折验证结果
epoch: 5, train loss: 0.005050, valid loss: 0.005044
epoch: 10, train loss: 0.005040, valid loss: 0.005060
epoch: 15, train loss: 0.005033, valid loss: 0.005065
epoch: 20, train loss: 0.005030, valid loss: 0.005074
第 5 折验证结果
epoch: 5, train loss: 0.005036, valid loss: 0.005039
epoch: 10, train loss: 0.005033, valid loss: 0.005043
epoch: 15, train loss: 0.005031, valid loss: 0.005049
epoch: 20, train loss: 0.005029, valid loss: 0.005053
第 6 折验证结果
epoch: 5, train loss: 0.005022, valid loss: 0.005063
epoch: 10, train loss: 0.005050, valid loss: 0.005066
epoch: 15, train loss: 0.005019, valid loss: 0.005067
epoch: 20, train loss: 0.005018, valid loss: 0.005071
第 7 折验证结果
epoch: 5, train loss: 0.005042, valid loss: 0.004946
epoch: 10, train loss: 0.005041, valid loss: 0.004947
epoch: 15, train loss: 0.005041, valid loss: 0.004949
epoch: 20, train loss: 0.005040, valid loss: 0.004948
第 8 折验证结果
epoch: 5, train loss: 0.005045, valid loss: 0.005073
epoch: 10, train loss: 0.005051, valid loss: 0.005075
epoch: 15, train loss: 0.005048, valid loss: 0.005075
epoch: 20, train loss: 0.005052, valid loss: 0.005078
第 9 折验证结果
epoch: 5, train loss: 0.005021, valid loss: 0.005410
epoch: 10, train loss: 0.005021, valid loss: 0.005410
epoch: 15, train loss: 0.005024, valid loss: 0.005412
epoch: 20, train loss: 0.005023, valid loss: 0.005412
第 10 折验证结果
epoch: 5, train loss: 0.005028, valid loss: 0.005024
epoch: 10, train loss: 0.005038, valid loss: 0.005024
epoch: 15, train loss: 0.005059, valid loss: 0.005023
epoch: 20, train loss: 0.005047, valid loss: 0.005024
最终平均k折交叉验证结果
average train loss: 0.005121417903248786
average valid loss: 0.005345280727046977
# 绘图
train_loss, valid_loss = [], []
for i in range(len(my_k_train_ls)):
    train_loss.append(my_k_train_ls[i][-1])
    valid_loss.append(my_k_valid_ls[i][-1])
    
x = np.linspace(0,len(my_k_train_ls),len(my_k_train_ls))
plt.plot(x,train_loss,'o-',label='train_loss',linewidth=1.5)
plt.plot(x,valid_loss,'o-',label='valid_loss',linewidth=1.5)
plt.xlabel('K value')
plt.ylabel('loss')
plt.legend()
plt.show()

png

# 绘制表格
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
randn = np.random.randn
idx = []
for i in range(1,21):
    idx.append(f'epoch {i}')

data_train, data_valid = np.zeros((10,20)),np.zeros((10,20))
for i in range(10):
    for j in range(20):
        data_train[i,j], data_valid[i,j] = my_k_train_ls[i][j], my_k_valid_ls[i][j] 
       
    
df = DataFrame(data_train.T, index=idx, columns=['第1折', '第2折', '第3折', '第4折', '第5折',
                                                '第6折', '第7折', '第8折', '第9折', '第10折'])

vals = np.around(df.values,7)
fig = plt.figure(figsize=(8,3))
ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])
the_table=plt.table(cellText=vals, rowLabels=df.index, colLabels=df.columns,
                    colWidths = [0.1]*vals.shape[1], loc='center',cellLoc='center')
the_table.set_fontsize(20)

the_table.scale(2.5,2.58)

png

交叉验证:

交叉验证是在机器学习建立模型和验证模型参数时常用的办法,一般被用于评估一个机器学习模型的表现。更多的情况下,我们也用交叉验证来进行模型选择(model selection)。

交叉验证,顾名思义,就是重复的使用数据,把得到的样本数据进行切分,组合为不同的训练集和测试集,用训练集来训练模型,用测试集来评估模型预测的好坏。在此基础上可以得到多组不同的训练集和测试集,某次训练集中的某样本在下次可能成为测试集中的样本,即所谓“交叉”。

那么什么时候才需要交叉验证呢?交叉验证用在数据不是很充足的时候。如果数据样本量小于一万条,我们就会采用交叉验证来训练优化选择模型。如果样本大于一万条的话,我们一般随机的把数据分成三份,一份为训练集(Training Set),一份为验证集(Validation Set),最后一份为测试集(Test Set)。用训练集来训练模型,用验证集来评估模型预测的好坏和选择模型及其对应的参数。把最终得到的模型再用于测试集,最终决定使用哪个模型以及对应参数。

总结来说,交叉验证有以下优点:

  1. 交叉验证可以有效评估模型的质量
  2. 交叉验证可以有效选择在数据集上表现最好的模型
  3. 交叉验证可以有效避免过拟合和欠拟合
posted @ 2022-10-23 18:38  cyberbase  阅读(596)  评论(0编辑  收藏  举报