《PyTorch 深度学习实践 》-刘二大人 第十讲

课堂练习:

 1 import torch
 2 from torchvision import transforms
 3 from torchvision import datasets
 4 from torch.utils.data import DataLoader
 5 import torch.nn.functional as F
 6 import torch.optim as optim
 7 import matplotlib.pyplot as plt
 8 import os
 9 os.environ['KMP_DUPLICATE_LIB_OK']='True'
10 
11 # prepare dataset
12 batch_size = 64
13 transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
14 
15 train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
16 train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
17 test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
18 test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)
19 
20 # design model using class
21 class Net(torch.nn.Module):
22     def __init__(self):
23         super(Net, self).__init__()
24         self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
25         self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
26         self.pooling = torch.nn.MaxPool2d(2)
27         self.fc = torch.nn.Linear(320, 10)
28 
29     def forward(self, x):
30         # flatten data from (n,1,28,28) to (n, 784)
31 
32         batch_size = x.size(0)
33         x = F.relu(self.pooling(self.conv1(x)))
34         x = F.relu(self.pooling(self.conv2(x)))
35         x = x.view(batch_size, -1)  # -1 此处自动算出的是320
36         # print("x.shape",x.shape)
37         x = self.fc(x)
38 
39         return x
40 
41 model = Net()
42 #使用GPU
43 device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
44 model.to(device)
45 
46 # construct loss and optimizer
47 criterion = torch.nn.CrossEntropyLoss()
48 optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
49 
50 
51 # training cycle forward, backward, update
52 def train(epoch):
53     running_loss = 0.0
54     for batch_idx, data in enumerate(train_loader, 0):
55         inputs, target = data
56         inputs, target = inputs.to(device), target.to(device)
57         optimizer.zero_grad()
58 
59         outputs = model(inputs)
60         loss = criterion(outputs, target)
61         loss.backward()
62         optimizer.step()
63 
64         running_loss += loss.item()
65         if batch_idx % 300 == 299:
66             print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
67             running_loss = 0.0
68 
69 def hehe():
70     correct = 0
71     total = 0
72     with torch.no_grad():
73         for data in test_loader:
74             images, labels = data
75             images, labels = images.to(device), labels.to(device)
76             outputs = model(images)
77             _, predicted = torch.max(outputs.data, dim=1)
78             total += labels.size(0)
79             correct += (predicted == labels).sum().item()
80     print('accuracy on test set: %d %% ' % (100 * correct / total))
81     return correct / total
82 
83 if __name__ == '__main__':
84     epoch_list = []
85     acc_list = []
86 
87     for epoch in range(10):
88         train(epoch)
89         acc = hehe()
90         epoch_list.append(epoch)
91         acc_list.append(acc)
92 
93     plt.plot(epoch_list, acc_list)
94     plt.ylabel('accuracy')
95     plt.xlabel('epoch')
96     plt.show()

结果:比上一个作业错误率降低了三分之一(论说话的艺术性)

……

accuracy on test set: 98 %
[9, 300] loss: 0.037
[9, 600] loss: 0.043
[9, 900] loss: 0.042
accuracy on test set: 98 %
[10, 300] loss: 0.038
[10, 600] loss: 0.038
[10, 900] loss: 0.035
accuracy on test set: 98 %

 

posted @ 2022-10-23 17:14  silvan_happy  阅读(90)  评论(0编辑  收藏  举报