pytorch学习笔记
参考莫烦python:
cnn:
import torch import torch.utils.data as Data import matplotlib.pyplot as plt import torch.nn.functional as F import torch.nn as nn import torchvision EPOCH = 1 BATCH_SIZE = 50 LR = 0.01 DOWNLOAD_MNIST = False train_data = torchvision.datasets.MNIST( root = './minst/', train=True, transform=torchvision.transforms.ToTensor(), download=DOWNLOAD_MNIST ) # plt.imshow(train_data.train_data[0].numpy(), cmap='gray') # plt.title('%i' % train_data.train_data_labels[0]) # plt.show() train_loader = Data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True) test_data = torchvision.datasets.MNIST(root='./minst/', train=False) test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1) test_y = test_data.test_labels[:2000] class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Sequential( #1 * 16 * 16 nn.Conv2d( in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2, ), #16 * 28 * 28 nn.ReLU(), nn.MaxPool2d(kernel_size=2), #16 * 14 * 14 ) self.conv2 = nn.Sequential( nn.Conv2d(16, 32, 5, 1, 2), #32 * 14 * 14 nn.ReLU(), nn.MaxPool2d(2) #32 * 7 * 7 ) self.out = nn.Linear(32 * 7 * 7, 10) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = x.view(x.size(0), -1) #压缩图像成一维 output = self.out(x) return output cnn = CNN() print(cnn) optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted # training and testing for epoch in range(EPOCH): for step, (b_x, b_y) in enumerate(train_loader): # 分配 batch data, normalize x when iterate train_loader output = cnn(b_x) # cnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step % 50 == 0: test_output = cnn(test_x) pred_y = torch.max(test_output, 1)[1].data.squeeze() accuracy = sum(pred_y == test_y) / float(test_y.size(0)) print('Epoch:', epoch, '|train loss: %.4f' % loss.data) print('|accuracy: %.4f' % accuracy) test_output = cnn(test_x[:10]) pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() print(pred_y, 'prediction number') print(test_y[:10].numpy(), 'real number')
rnn:
import torch import torch.utils.data as Data import matplotlib.pyplot as plt import torch.nn.functional as F import torch.nn as nn import torchvision EPOCH = 1 BATCH_SIZE = 64 TIME_STEP = 28 INPUT_SIZE = 28 LR = 0.01 DOWNLOAD_MNIST = False train_data = torchvision.datasets.MNIST( root='./minst/', train=True, transform=torchvision.transforms.ToTensor(), download=DOWNLOAD_MNIST ) # plt.imshow(train_data.train_data[0].numpy(), cmap='gray') # plt.title('%i' % train_data.train_data_labels[0]) # plt.show() train_loader = Data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True) test_data = torchvision.datasets.MNIST(root='./minst/', train=False) test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1) test_y = test_data.test_labels[:2000] class RNN(nn.Module): def __init__(self): super(RNN, self).__init__() self.rnn = nn.LSTM( input_size=INPUT_SIZE, hidden_size=64, num_layers=1, batch_first=True, ) self.out = nn.Linear(64, 10) def forward(self, x): r_out, (h_n, h_c) = self.rnn(x, None) out = self.out(r_out[:, -1, :]) return out rnn = RNN() print(rnn) optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted # training and testing for epoch in range(EPOCH): for step, (x, b_y) in enumerate(train_loader): # 分配 batch data, normalize x when iterate train_loader b_x = x.view(-1, 28, 28) output = rnn(b_x) # cnn output loss = loss_func(output, b_y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step % 50 == 0: test_output = rnn(test_x.view(-1, 28, 28)) pred_y = torch.max(test_output, 1)[1].data.squeeze() accuracy = sum(pred_y == test_y) / float(test_y.size(0)) print('Epoch:', epoch, '|train loss: %.4f' % loss.data) print('|accuracy: %.4f' % accuracy) test_output = rnn(test_x[:10].view(-1, 28, 28)) pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() print(pred_y, 'prediction number') print(test_y[:10].numpy(), 'real number')
rnn(回归)
import torch import torch.utils.data as Data import matplotlib.pyplot as plt import torch.nn.functional as F import torch.nn as nn import torchvision import numpy as np EPOCH = 1 BATCH_SIZE = 64 TIME_STEP = 28 INPUT_SIZE = 1 LR = 0.02 DOWNLOAD_MNIST = False # train_data = torchvision.datasets.MNIST( # root='./minst/', # train=True, # transform=torchvision.transforms.ToTensor(), # download=DOWNLOAD_MNIST # ) # plt.imshow(train_data.train_data[0].numpy(), cmap='gray') # plt.title('%i' % train_data.train_data_labels[0]) # plt.show() steps = np.linspace(0, np.pi * 2, 100, dtype=np.float32) # x_np = np.sin(steps) # y_np = np.cos(steps) # plt.plot(steps, y_np, 'r-', label='target(cos)') # plt.plot(steps, x_np, 'b-', label='input(sin)') # plt.legend(loc='best') # plt.show() class RNN(nn.Module): def __init__(self): super(RNN, self).__init__() self.rnn = nn.RNN( input_size=INPUT_SIZE, hidden_size=32, num_layers=1, batch_first=True ) self.out = nn.Linear(32, 1) def forward(self, x, h_state): r_out, h_state = self.rnn(x, h_state) outs = [] for time_step in range(r_out.size(1)): outs.append(self.out(r_out[:, time_step, :])) return torch.stack(outs, dim=1), h_state rnn = RNN() print(rnn) optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all rnn parameters loss_func = nn.MSELoss() h_state = None # 要使用初始 hidden state, 可以设成 None for step in range(100): start, end = step * np.pi, (step+1)*np.pi # time steps # sin 预测 cos steps = np.linspace(start, end, 10, dtype=np.float32) x_np = np.sin(steps) # float32 for converting torch FloatTensor y_np = np.cos(steps) x = torch.from_numpy(x_np[np.newaxis, :, np.newaxis]) # shape (batch, time_step, input_size) y = torch.from_numpy(y_np[np.newaxis, :, np.newaxis]) prediction, h_state = rnn(x, h_state) # rnn 对于每个 step 的 prediction, 还有最后一个 step 的 h_state # !! 下一步十分重要 !! h_state = h_state.data # 要把 h_state 重新包装一下才能放入下一个 iteration, 不然会报错 loss = loss_func(prediction, y) # cross entropy loss optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if step % 5 == 0: tmp = torch.squeeze(prediction) plt.plot(steps, y_np, 'r-', label='target(cos)') plt.plot(steps, tmp.detach().numpy(), 'b-', label='output') plt.legend(loc='best') # plt.show() plt.ion() plt.pause(1) plt.close()