第二章 2.5 保存和加载文件中Pytorch模型

训练并保存模型参数到文件

# https://github.com/PacktPublishing/Modern-Computer-Vision-with-PyTorch
# https://github.com/PacktPublishing/Modern-Computer-Vision-with-PyTorch

###################  Chapter Two #######################################

import torch
import torch.nn as nn
########################################################################
x = [[1,2],[3,4],[5,6],[7,8]]
y = [[3],[7],[11],[15]]

X = torch.tensor(x).float()
Y = torch.tensor(y).float()
########################################################################
device = 'cuda' if torch.cuda.is_available() else 'cpu'
X = X.to(device)
Y = Y.to(device)
print(device)

########################################################################
# 使用定义类方法构建神经网络
# class MyNeuralNet(nn.Module):
#     def __init__(self):
#         super().__init__()
#         self.input_to_hidden_layer = nn.Linear(2,8)
#         self.hidden_layer_activation = nn.ReLU()
#         self.hidden_to_output_layer = nn.Linear(8,1)
#     def forward(self, x):
#         x = self.input_to_hidden_layer(x)
#         x = self.hidden_layer_activation(x)
#         x = self.hidden_to_output_layer(x)
#         return x
# mynet = MyNeuralNet().to(device)
########################################################################
# 使用序贯方法构建神经网络
mynet =  nn.Sequential(
    nn.Linear(2, 8),
    nn.ReLU(),
    nn.Linear(8, 1)
    ).to(device)

# 输出神经网络模型摘要
from torchsummary import summary

summary(mynet, torch.zeros(1, 2))

########################################################################
loss_func = nn.MSELoss()

_Y = mynet(X)
loss_value = loss_func(_Y,Y)
print("损失值",loss_value)

def my_loss_fun(_y,y):
    loss=(_y-y)**2
    loss =loss.mean()
    return loss
loss_func = my_loss_fun

########################################################################
from torch.optim import SGD
opt = SGD(mynet.parameters(), lr = 0.001)

########################################################################
loss_history = []
for _ in range(100):
    opt.zero_grad()
    loss_value = loss_func(mynet(X),Y)
    loss_value.backward()
    opt.step()
    loss_history.append(loss_value.item())
########################################################################
import matplotlib.pyplot as plt
#matplotlib inline
plt.plot(loss_history)
plt.title('Loss variation over increasing epochs')
plt.xlabel('epochs')
plt.ylabel('loss value')
plt.show()
########################################################################
_Y = mynet(X)
loss_value = loss_func(_Y,Y)
print("损失值",loss_value)

val_x = [[7,8]]
val_x = torch.tensor(val_x).float().to(device)
print("输入输出测试",mynet(val_x))

# 保存模型
save_path = 'mynet.pth'
torch.save(mynet.state_dict(), save_path)

# # 读取模型
# 
# load_path = save_path
# mynet.load_state_dict(torch.load(load_path, weights_only=True))
# # 测试
# val = [[8,9],[10,11],[1.5,2.5]]
# val = torch.tensor(val).float()
# 
# print("测试",mynet(val.to(device)))

 

装载文件里的模型参数

import torch
import torch.nn as nn

device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 使用序贯方法构建神经网络
mynet =  nn.Sequential(
    nn.Linear(2, 8),
    nn.ReLU(),
    nn.Linear(8, 1)
    ).to(device)

save_path = 'mynet.pth'
# 读取模型

load_path = save_path
mynet.load_state_dict(torch.load(load_path, weights_only=True))
# 测试
val = [[8,9],[10,11],[1.5,2.5]]
val = torch.tensor(val).float()

print("测试",mynet(val.to(device)))

 

posted @ 2024-12-12 19:12  辛河  阅读(11)  评论(0编辑  收藏  举报