《PyTorch深度学习实践》-刘二大人 第五讲

 1 import torch
 2 
 3 # 1prepare dataset
 4 # x,y是矩阵,3行1列 也就是说总共有3个数据,每个数据只有1个特征
 5 x_data = torch.tensor([[1.0], [2.0], [3.0]])
 6 y_data = torch.tensor([[2.0], [4.0], [6.0]])
 7 
 8 # 2design model using class
 9 """
10 our model class should be inherit from nn.Module, which is base class for all neural network modules.
11 member methods __init__() and forward() have to be implemented
12 class nn.linear contain two member Tensors: weight and bias
13 class nn.Linear has implemented the magic method __call__(),which enable the instance of the class can
14 be called just like a function.Normally the forward() will be called 
15 """
16 class LinearModel(torch.nn.Module):
17     def __init__(self):
18         super(LinearModel, self).__init__()
19         # (1,1)是指输入x和输出y的特征维度,这里数据集中的x和y的特征都是1维的
20         # 该线性层需要学习的参数是w和b  获取w/b的方式分别是~linear.weight/linear.bias
21         self.linear = torch.nn.Linear(1, 1)
22 
23     def forward(self, x):
24         y_pred = self.linear(x)
25         return y_pred
26 
27 model = LinearModel()
28 
29 # 3construct loss and optimizer
30 # criterion = torch.nn.MSELoss(size_average = False)
31 criterion = torch.nn.MSELoss(reduction='sum')
32 optimizer = torch.optim.SGD(model.parameters(), lr=0.01)  # model.parameters()自动完成参数的初始化操作
33 
34 # 4training cycle forward, backward, update
35 for epoch in range(100):
36     y_pred = model(x_data)  # forward:predict
37     loss = criterion(y_pred, y_data)  # forward: loss
38     print(epoch, loss.item())
39 
40     optimizer.zero_grad()  # the grad computer by .backward() will be accumulated. so before backward, remember set the grad to zero
41     loss.backward()  # backward: autograd,自动计算梯度
42     optimizer.step()  # update 参数,即更新w和b的值
43 
44 print('w = ', model.linear.weight.item())
45 print('b = ', model.linear.bias.item())
46 
47 x_test = torch.tensor([[4.0]])
48 y_test = model(x_test)
49 print('y_pred = ', y_test.data)

 

posted @ 2022-10-20 19:57  silvan_happy  阅读(57)  评论(0编辑  收藏  举报