Pytorch 深度学习实践 第4讲
import torch
# x, y是矩阵,3行1列,也就是说总共有3个数据,每个数据只有1个特征
x_data = torch.tensor([[1.0], [2.0], [3.0]])
y_data = torch.tensor([[2.0], [4.0], [6.0]])
# design model using class
"""
our model class should be inherit from nn.Module, which is base class for all neural network modules.
member methods __init__() and forward() have to be implemented
class nn.linear contain two member Tensors: weight and bias
class nn.Linear has implemented the magic method __call__(),which enable the instance of the class can
be called just like a function.Normally the forward() will be called
"""
class LinearModel(torch.nn.Module):
def __init__(self):
# (1,1)是指输入x和输出y的特征维度,这里数据集中的x和y的特征都是1维的
# 该线性层需要学习的参数是w和b 获取w/b的方式分别是~linear.weight/linear.bias
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = LinearModel()
# construct loss and optimizer
# criterion = torch.nn.MSELoss(size_average = False)
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # model.parameters()自动完成参数的初始化操作
# torch.optim.
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(epoch, loss.item())
loss.backward() # backward: autograd,自动计算梯度
optimizer.step() # update 参数,即更新w和b的值
optimizer.zero_grad() # the grad computer by .backward() will be accumulated. so before backward, remember set the grad to zero
print('w= ', model.linear.weight.item())
print('b= ', model.linear.bias.item())
x_test = torch.tensor([[4.0]])
y_test = model(x_test)
print('y_pred = ', y_test.data)
作业:
import torch
import numpy as np
import matplotlib.pyplot as plt
x_data = torch.tensor([[1.0], [2.0], [3.0]])
y_data = torch.tensor([[2.0], [4.0], [6.0]])
class LinearModel(torch.nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1, 1) # 构造对象,并说明输入输出的维数,第三个参数默认为true,表示用到b
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = LinearModel()
criterion = torch.nn.MSELoss(reduction='sum')
epoch_list = np.arange(0, 100, 1)
plt.figure()
optimizer_list = ['Adagrad', 'Adam', 'Adamax', 'ASGD', 'RMSprop', 'Rprop', 'sgd']
for opt_list in optimizer_list:
if opt_list == 'Adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=0.01)
elif opt_list == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
elif opt_list == 'Adamax':
optimizer = torch.optim.Adamax(model.parameters(), lr=0.01)
elif opt_list == 'ASGD':
optimizer = torch.optim.ASGD(model.parameters(), lr=0.01)
elif opt_list == 'RMSprop':
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.01)
elif opt_list == 'Rprop':
optimizer = torch.optim.Rprop(model.parameters(), lr=0.01)
elif opt_list == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
loss_list = []
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
loss_list.append(loss.item())
loss.backward()
optimizer.step()
optimizer.zero_grad()
plt.plot(epoch_list, loss_list)
plt.title(opt_list)
plt.show()
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· 【自荐】一款简洁、开源的在线白板工具 Drawnix