《PyTorch深度学习实践》-刘二大人 第六讲

 1 import torch
 2 import torch.nn.functional as F
 3 
 4 # 1prepare dataset
 5 x_data = torch.Tensor([[1.0], [2.0], [3.0]])
 6 y_data = torch.Tensor([[0], [0], [1]])
 7 
 8 
 9 # 2design model using class
10 class LogisticRegressionModel(torch.nn.Module):
11     def __init__(self):
12         super(LogisticRegressionModel, self).__init__()
13         self.linear = torch.nn.Linear(1, 1)
14 
15     def forward(self, x):
16         #y_pred = F.sigmoid(self.linear(x))
17         y_pred = torch.sigmoid(self.linear(x))
18         return y_pred
19 model = LogisticRegressionModel()
20 
21 # 3construct loss and optimizer
22 # 默认情况下,loss会基于element平均,如果size_average=False的话,loss会被累加。
23 # pytorch版本更新,损失函数更改size_average=False为reduction='sum'
24 # BCELoss是CrossEntropyLoss的一个特例,只用于二分类问题,而CrossEntropyLoss可以用于二分类,也可以用于多分类。
25 criterion = torch.nn.BCELoss(reduction='sum')
26 optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
27 
28 # 4training cycle forward, backward, update
29 for epoch in range(1000):
30     y_pred = model(x_data)
31     loss = criterion(y_pred, y_data)
32     print(epoch, loss.item())
33 
34     optimizer.zero_grad()
35     loss.backward()
36     optimizer.step()
37 
38 print('w = ', model.linear.weight.item())
39 print('b = ', model.linear.bias.item())
40 
41 x_test = torch.Tensor([[4.0]])
42 y_test = model(x_test)
43 print('y_pred = ', y_test.data)

 

posted @ 2022-10-20 19:57  silvan_happy  阅读(48)  评论(0编辑  收藏  举报