利用torch.nn实现前馈神经网络解决 二分类 任务

1 导入包

import torch 
import torch.nn as nn
from torch.utils.data import TensorDataset,DataLoader
from torch.nn import init
import torch.optim as optim
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt

2 创建数据

num_inputs,num_example = 200,10000
x1 = torch.normal(2,1,(num_example,num_inputs))
y1 = torch.ones((num_example,1))
x2 = torch.normal(-2,1,(num_example,num_inputs))
y2 = torch.zeros((num_example,1))
x_data = torch.cat((x1,x2),dim=0)
y_data = torch.cat((y1,y2),dim = 0)
train_x,test_x,train_y,test_y = train_test_split(x_data,y_data,shuffle=True,test_size=0.3,stratify=y_data)

3 加载数据

batch_size = 256
train_dataset = TensorDataset(train_x,train_y)
train_iter = DataLoader(
    dataset = train_dataset,
    shuffle = True,
    num_workers = 0,
    batch_size = batch_size
)
test_dataset = TensorDataset(test_x,test_y)
test_iter = DataLoader(
    dataset = test_dataset,
    shuffle = True,
    num_workers = 0,
    batch_size = batch_size
)

4 模型定义

num_input,num_hidden,num_output = 200,256,1
class net(nn.Module):
    def __init__(self,num_input,num_hidden,num_output):
        super(net,self).__init__()
        self.linear1 = nn.Linear(num_input,num_hidden,bias =False)
        self.linear2 = nn.Linear(num_hidden,num_output,bias=False)
    def forward(self,input):
        out = self.linear1(input)
        out = self.linear2(out)
        return out

5 模型初始化

model = net(num_input,num_hidden,num_output)
print(model)
for param in model.parameters():
    init.normal_(param,mean=0,std=0.001)

6 定义训练函数

lr = 0.001
loss = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(),lr)
def train(net,train_iter,test_iter,loss,num_epochs,batch_size):
    train_ls,test_ls,train_acc,test_acc = [],[],[],[]
    for epoch in range(num_epochs):
        train_ls_sum,train_acc_sum,n = 0,0,0
        for x,y in train_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_ls_sum +=l.item()
            train_acc_sum += (((y_pred>0.5)==y)+0.0).sum().item()
            n += y_pred.shape[0]
        train_ls.append(train_ls_sum)
        train_acc.append(train_acc_sum/n)
        
        test_ls_sum,test_acc_sum,n = 0,0,0
        for x,y in test_iter:
            y_pred = model(x)
            l = loss(y_pred,y)
            test_ls_sum +=l.item()
            test_acc_sum += (((y_pred>0.5)==y)+0.0).sum().item()
            n += y_pred.shape[0]
        test_ls.append(test_ls_sum)
        test_acc.append(test_acc_sum/n)
        print('epoch %d, train_loss %.6f,test_loss %f, train_acc %.6f,test_acc %f'
              %(epoch+1, train_ls[epoch],test_ls[epoch], train_acc[epoch],test_acc[epoch]))
    return train_ls,test_ls,train_acc,test_acc
       

7 训练

#训练次数和学习率
num_epochs = 10
train_loss,test_loss,train_acc,test_acc = train(model,train_iter,test_iter,loss,num_epochs,batch_size)

8 可视化

x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)

plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()

 

posted @ 2022-03-07 20:30  图神经网络  阅读(513)  评论(0编辑  收藏  举报
Live2D