Pytorch 实现多层感知机
1 导入实验所需要的包
from torch.autograd import Variable from torch.utils.data import TensorDataset,DataLoader import torch import numpy as np import matplotlib.pyplot as plt from torch.autograd import Variable import torch.nn.functional as F import pandas as pd from torch import nn from sklearn.model_selection import train_test_split #魔法函数 %matplotlib inline
2 加载原始数据集
data = pd.read_csv("./dataset/HR.csv") #加载csv数据 data = data.join(pd.get_dummies(data.salary)) #将salary 转换成类似one-hot形式加到数据列中 del data['salary'] #删除原有的salary这列 data = data.join(pd.get_dummies(data.part)) #将 part 转换成类似one-hot形式加到数据列中 del data['part'] #删除原有的part这列
3 原始数据集转换
Y_data = data.left.values.reshape(-1,1) #获取标签数据 Y = torch.from_numpy(Y_data).type(torch.FloatTensor).cuda() #转换到cuda上 #不加.values X_data 的数据类型是pandas.core.frame.DataFrame;加上变成numpy.ndarray X_data = data[ [c for c in data.columns if c!='left'] ].values X= torch.from_numpy(X_data).type(torch.FloatTensor).cuda()
4 定义模型
#模型创建方法 class Model(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(20,64) self.linear2 = nn.Linear(64,128) self.linear3 = nn.Linear(128,1) def forward(self,input): x = F.relu( self.linear1(input)) x = F.relu(self.linear2(x)) # y = F.sigmid(self.linear3(x)) y = torch.sigmoid(self.linear3(x)) return y
5 定义优化器和设置模型
def get_model(lr): model = Model().cuda() optimizer = torch.optim.Adam(model.parameters(),lr=lr) return model,optimizer model,optimizer = get_model(lr=0.0001)
6 定义损失函数
loss_fn = nn.BCELoss()
7 定义超参数
batch_size = 64
num_epoch = 1000
8 划分训练集和测试集
train_x,test_x,train_y,test_y = train_test_split(X,Y,train_size=0.8,shuffle= True) #划分数据集 hr_train_dataset = TensorDataset(train_x,train_y) #转换数据集到数据加载器 hr_train_iter = DataLoader(hr_train_dataset,batch_size=64,shuffle = True) hr_test_dataset = TensorDataset(test_x,test_y) hr_test_iter = DataLoader(hr_test_dataset,batch_size=64)
9 定义准确率函数
def accurancy(y_pred,y): y_pred=(y_pred >0.5).type(torch.int32) acc = (y_pred==y).float().mean() return acc
10 训练
for epoch in range(num_epoch): for x,y in hr_train_iter: y_hat = model(x) loss = loss_fn(y_hat,y) optimizer.zero_grad() loss.backward() optimizer.step() with torch.no_grad(): train_acc = accurancy(model(train_x),train_y) test_acc = accurancy(model(test_x),test_y) print("epoch : ",epoch,"train_loss: ",loss_fn(model(train_x),train_y).data.item()," train_acc :",train_acc.data.item(), " train_loss: ",loss_fn(model(test_x),test_y).data.item()," test_acc :",test_acc.data.item() )
因上求缘,果上努力~~~~ 作者:图神经网络,转载请注明原文链接:https://www.cnblogs.com/BlairGrowing/p/15439493.html