第一次作业:深度学习基础
2、代码训练
2.1 图像处理基本练习
1. 下载并显示图像
!wget https://raw.githubusercontent.com/summitgao/ImageGallery/master/yeast_colony_array.jpg
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import skimage
from skimage import data
from skimage import io
colony = io.imread('yeast_colony_array.jpg')
print(type(colony))
print(colony.shape)
<class 'numpy.ndarray'>
(406, 604, 3)
# Plot all channels of a real image
plt.subplot(121)
plt.imshow(colony[:,:,:])
plt.title('3-channel image')
plt.axis('off')
# Plot one channel only
plt.subplot(122)
plt.imshow(colony[:,:,0])
plt.title('1-channel image')
plt.axis('off');
2. 读取并改变图像像素值
# Get the pixel value at row 10, column 10 on the 10th row and 20th column
camera = data.camera()
print(camera[10, 20])
# Set a region to black
camera[30:100, 10:100] = 0
plt.imshow(camera, 'gray')
153
<matplotlib.image.AxesImage at 0x7f6543914550>
# Set the first ten lines to black
camera = data.camera()
camera[:10] = 0
plt.imshow(camera, 'gray')
<matplotlib.image.AxesImage at 0x7f6543903860>
# Set to "white" (255) pixels where mask is True
camera = data.camera()
mask = camera < 80
camera[mask] = 255
plt.imshow(camera, 'gray')
<matplotlib.image.AxesImage at 0x7f65438f1c18>
# Change the color for real images
cat = data.chelsea()
plt.imshow(cat)
<matplotlib.image.AxesImage at 0x7f6549de64a8>
# Set brighter pixels to red
red_cat = cat.copy()
reddish = cat[:, :, 0] > 160
red_cat[reddish] = [255, 0, 0]
plt.imshow(red_cat)
<matplotlib.image.AxesImage at 0x7f654387a470>
# Change RGB color to BGR for openCV
BGR_cat = cat[:, :, ::-1]
plt.imshow(BGR_cat)
<matplotlib.image.AxesImage at 0x7f65437cebe0>
3. 转换图像数据类型
from skimage import img_as_float, img_as_ubyte
float_cat = img_as_float(cat)
uint_cat = img_as_ubyte(float_cat)
4. 显示图像直方图
img = data.camera()
plt.hist(img.ravel(), bins=256, histtype='step', color='black');
5. 图像分割
# Use colony image for segmentation
colony = io.imread('yeast_colony_array.jpg')
# Plot histogram
img = skimage.color.rgb2gray(colony)
plt.hist(img.ravel(), bins=256, histtype='step', color='black');
# Use thresholding
plt.imshow(img>0.5)
<matplotlib.image.AxesImage at 0x7f654368e668>
6. Canny 算子用于边缘检测
from skimage.feature import canny
from scipy import ndimage as ndi
img_edges = canny(img)
img_filled = ndi.binary_fill_holes(img_edges)
# Plot
plt.figure(figsize=(18, 12))
plt.subplot(121)
plt.imshow(img_edges, 'gray')
plt.subplot(122)
plt.imshow(img_filled, 'gray')
<matplotlib.image.AxesImage at 0x7f6535703160>
7. 改变图像的对比度
# Load an example image
img = data.camera()
plt.imshow(img, 'gray')
<matplotlib.image.AxesImage at 0x7f6535361438>
# Contrast stretching
from skimage import exposure
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
plt.imshow(img_rescale, 'gray')
<matplotlib.image.AxesImage at 0x7f653529f860>
# Equalization
img_eq = exposure.equalize_hist(img)
plt.imshow(img_eq, 'gray')
<matplotlib.image.AxesImage at 0x7f65351f92b0>
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
plt.imshow(img_adapteq, 'gray')
<matplotlib.image.AxesImage at 0x7f65351d3438>
# Display results
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
ax_img.set_adjustable('box')
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
fig = plt.figure(figsize=(16, 8))
axes = np.zeros((2, 4), dtype=np.object)
axes[0, 0] = fig.add_subplot(2, 4, 1)
for i in range(1, 4):
axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])
for i in range(0, 4):
axes[1, i] = fig.add_subplot(2, 4, 5+i)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
fig.tight_layout()
plt.show()
2.2 pytorch 基础练习
1. 定义数据
import torch
# 可以是一个数
x = torch.tensor(666)
print(x)
tensor(666)
# 可以是一维数组(向量)
x = torch.tensor([1,2,3,4,5,6])
print(x)
tensor([1, 2, 3, 4, 5, 6])
# 可以是二维数组(矩阵)
x = torch.ones(2,3)
print(x)
tensor([[1., 1., 1.],
[1., 1., 1.]])
# 可以是任意维度的数组(张量)
x = torch.ones(2,3,4)
print(x)
tensor([[[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]],
[[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]]])
# 创建一个空张量
x = torch.empty(5,3)
print(x)
tensor([[8.3910e-34, 0.0000e+00, 3.3631e-44],
[0.0000e+00, nan, 0.0000e+00],
[1.1578e+27, 1.1362e+30, 7.1547e+22],
[4.5828e+30, 1.2121e+04, 7.1846e+22],
[9.2198e-39, 7.0374e+22, 5.0948e-14]])
# 创建一个随机初始化的张量
x = torch.rand(5,3)
print(x)
tensor([[0.5759, 0.2526, 0.5377],
[0.6939, 0.4574, 0.4032],
[0.2699, 0.4377, 0.0578],
[0.2470, 0.3134, 0.8089],
[0.1589, 0.5190, 0.9532]])
# 创建一个全0的张量,里面的数据类型为 long
x = torch.zeros(5,3,dtype=torch.long)
print(x)
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
# 基于现有的tensor,创建一个新tensor,
# 从而可以利用原有的tensor的dtype,device,size之类的属性信息
y = x.new_ones(5,3) #tensor new_* 方法,利用原来tensor的dtype,device
print(y)
tensor([[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
z = torch.randn_like(x, dtype=torch.float) # 利用原来的tensor的大小,但是重新定义了dtype
print(z)
tensor([[ 1.0534, -1.1744, 0.1066],
[ 1.3747, 0.4671, -0.4245],
[ 0.4190, -0.4131, 0.0283],
[-0.0643, 0.2453, 0.1471],
[-1.1857, 1.7673, 0.8483]])
2. 定义操作
import torch
m = torch.Tensor([[2, 5, 3, 7],
[4, 2, 1, 9]])
print(m.size(0), m.size(1), m.size(), sep=' -- ')
2 -- 4 -- torch.Size([2, 4])
print(m.numel())
8
# 返回 第0行,第2列的数
print(m[0][2])
tensor(3.)
# 返回 第1列的全部元素
print(m[:, 1])
tensor([5., 2.])
# 返回 第0行的全部元素
print(m[0, :])
tensor([2., 5., 3., 7.])
# Create tensor of numbers from 1 to 5
# 注意这里结果是1到4,没有5
v = torch.arange(1, 5)
print(v)
tensor([1, 2, 3, 4])
m = torch.tensor([[2, 5, 3, 7],
[4, 2, 1, 9]])
m @ v
tensor([49, 47])
m[[0], :] @ v
tensor([49])
# Add a random tensor of size 2x4 to m
m + torch.rand(2, 4)
tensor([[2.3945, 5.0113, 3.1480, 7.9408], [4.8637, 2.2879, 1.7211, 9.2655]])
# 转置,由 2x4 变为 4x2
print(m.t())
# 使用 transpose 也可以达到相同的效果,具体使用方法可以百度
print(m.transpose(0, 1))
tensor([[2, 4], [5, 2], [3, 1], [7, 9]]) tensor([[2, 4], [5, 2], [3, 1], [7, 9]])
# returns a 1D tensor of steps equally spaced points between start=3, end=8 and steps=20
torch.linspace(3, 8, 20)
tensor([3.0000, 3.2632, 3.5263, 3.7895, 4.0526, 4.3158, 4.5789, 4.8421, 5.1053, 5.3684, 5.6316, 5.8947, 6.1579, 6.4211, 6.6842, 6.9474, 7.2105, 7.4737, 7.7368, 8.0000])
from matplotlib import pyplot as plt
# matlabplotlib 只能显示numpy类型的数据,下面展示了转换数据类型,然后显示
# 注意 randn 是生成均值为 0, 方差为 1 的随机数
# 下面是生成 1000 个随机数,并按照 100 个 bin 统计直方图
plt.hist(torch.randn(1000).numpy(), 100);
# 当数据非常非常多的时候,正态分布会体现的非常明显
plt.hist(torch.randn(10**6).numpy(), 100);
# 创建两个 1x4 的tensor
a = torch.Tensor([[1, 2, 3, 4]])
b = torch.Tensor([[5, 6, 7, 8]])
# 在 0 方向拼接 (即在 Y 方各上拼接), 会得到 2x4 的矩阵
print( torch.cat((a,b), 0))
tensor([[1., 2., 3., 4.], [5., 6., 7., 8.]])
# 在 1 方向拼接 (即在 X 方各上拼接), 会得到 1x8 的矩阵
print( torch.cat((a,b), 1))
tensor([[1., 2., 3., 4., 5., 6., 7., 8.]])
2.3 螺旋数据分类
!wget https://raw.githubusercontent.com/Atcold/pytorch-Deep-Learning/master/res/plot_lib.py
import random
import torch
from torch import nn, optim
import math
from IPython import display
from plot_lib import plot_data, plot_model, set_default
# 因为colab是支持GPU的,torch 将在 GPU 上运行
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('device: ', device)
# 初始化随机数种子。神经网络的参数都是随机初始化的,
# 不同的初始化参数往往会导致不同的结果,当得到比较好的结果时我们通常希望这个结果是可以复现的,
# 因此,在pytorch中,通过设置随机数种子也可以达到这个目的
seed = 12345
random.seed(seed)
torch.manual_seed(seed)
N = 1000 # 每类样本的数量
D = 2 # 每个样本的特征维度
C = 3 # 样本的类别
H = 100 # 神经网络里隐层单元的数量
X = torch.zeros(N * C, D).to(device)
Y = torch.zeros(N * C, dtype=torch.long).to(device)
for c in range(C):
index = 0
t = torch.linspace(0, 1, N) # 在[0,1]间均匀的取10000个数,赋给t
# 下面的代码不用理解太多,总之是根据公式计算出三类样本(可以构成螺旋形)
# torch.randn(N) 是得到 N 个均值为0,方差为 1 的一组随机数,注意要和 rand 区分开
inner_var = torch.linspace( (2*math.pi/C)*c, (2*math.pi/C)*(2+c), N) + torch.randn(N) * 0.2
# 每个样本的(x,y)坐标都保存在 X 里
# Y 里存储的是样本的类别,分别为 [0, 1, 2]
for ix in range(N * c, N * (c + 1)):
X[ix] = t[index] * torch.FloatTensor((math.sin(inner_var[index]), math.cos(inner_var[index])))
Y[ix] = c
index += 1
print("Shapes:")
print("X:", X.size())
print("Y:", Y.size())
plot_data(X, Y)
device: cuda:0 Shapes: X: torch.Size([3000, 2]) Y: torch.Size([3000])
learning_rate = 1e-3
lambda_l2 = 1e-5
# nn 包用来创建线性模型
# 每一个线性模型都包含 weight 和 bias
model = nn.Sequential(
nn.Linear(D, H),
nn.Linear(H, C)
)
model.to(device) # 把模型放到GPU上
# nn 包含多种不同的损失函数,这里使用的是交叉熵(cross entropy loss)损失函数
criterion = torch.nn.CrossEntropyLoss()
# 这里使用 optim 包进行随机梯度下降(stochastic gradient descent)优化
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=lambda_l2)
# 开始训练
for t in range(1000):
# 把数据输入模型,得到预测结果
y_pred = model(X)
# 计算损失和准确率
loss = criterion(y_pred, Y)
score, predicted = torch.max(y_pred, 1)
acc = (Y == predicted).sum().float() / len(Y)
print('[EPOCH]: %i, [LOSS]: %.6f, [ACCURACY]: %.3f' % (t, loss.item(), acc))
display.clear_output(wait=True)
# 反向传播前把梯度置 0
optimizer.zero_grad()
# 反向传播优化
loss.backward()
# 更新全部参数
optimizer.step()
print(y_pred.shape)
print(y_pred[10, :])
print(score[10])
print(predicted[10])
# Plot trained model
print(model)
plot_model(X, Y, model)
torch.Size([3000, 3]) tensor([-0.1566, -0.1720, -0.1466], device='cuda:0', grad_fn=<SliceBackward>) tensor(-0.1466, device='cuda:0', grad_fn=<SelectBackward>) tensor(2, device='cuda:0') Sequential( (0): Linear(in_features=2, out_features=100, bias=True) (1): Linear(in_features=100, out_features=3, bias=True) )
使用 print(y_pred.shape) 可以看到模型的预测结果,为[3000, 3]的矩阵。每个样本的预测结果为3个,保存在 y_pred 的一行里。值最大的一个,即为预测该样本属于的类别
score, predicted = torch.max(y_pred, 1) 是沿着第二个方向(即X方向)提取最大值。最大的那个值存在 score 中,所在的位置(即第几列的最大)保存在 predicted 中。
上面使用 print(model) 把模型输出,可以看到有两层:
- 第一层输入为 2(因为特征维度为主2),输出为 100;
- 第二层输入为 100 (上一层的输出),输出为 3(类别数)
从上面图示可以看出,线性模型的准确率最高只能达到 50% 左右,对于这样复杂的一个数据分布,线性模型难以实现准确分类。
learning_rate = 1e-3
lambda_l2 = 1e-5
# 这里可以看到,和上面模型不同的是,在两层之间加入了一个 ReLU 激活函数
model = nn.Sequential(
nn.Linear(D, H),
nn.ReLU(),
nn.Linear(H, C)
)
model.to(device)
# 下面的代码和之前是完全一样的,这里不过多叙述
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=lambda_l2) # built-in L2
# 训练模型,和之前的代码是完全一样的
for t in range(1000):
y_pred = model(X)
loss = criterion(y_pred, Y)
score, predicted = torch.max(y_pred, 1)
acc = ((Y == predicted).sum().float() / len(Y))
print("[EPOCH]: %i, [LOSS]: %.6f, [ACCURACY]: %.3f" % (t, loss.item(), acc))
display.clear_output(wait=True)
# zero the gradients before running the backward pass.
optimizer.zero_grad()
# Backward pass to compute the gradient
loss.backward()
# Update params
optimizer.step()
print(model)
plot_model(X, Y, model)
Sequential( (0): Linear(in_features=2, out_features=100, bias=True) (1): ReLU() (2): Linear(in_features=100, out_features=3, bias=True) )
相比于其它激活函数来说,ReLU有以下优势:对于线性函数而言,ReLU的表达能力更强,尤其体现在深度网络中;而对于非线性函数而言,ReLU由于非负区间的梯度为常数,
因此不存在梯度消失问题(Vanishing Gradient Problem),使得模型的收敛速度维持在一个稳定状态。
梯度消失问题:当梯度小于1时,预测值与真实值之间的误差每传播一层会衰减一次,如果在深层模型中使用sigmoid作为激活函数,这种现象尤为明显,将导致模型收敛停滞不前。
2.4 回归分析
!wget https://raw.githubusercontent.com/Atcold/pytorch-Deep-Learning/master/res/plot_lib.py
import random
import torch
from torch import nn, optim
import math
from IPython import display
from plot_lib import plot_data, plot_model, set_default
from matplotlib import pyplot as plt
set_default()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
seed = 1
random.seed(seed)
torch.manual_seed(seed)
N = 1000 # 每类样本的数量
D = 1 # 每个样本的特征维度
C = 1 # 类别数
H = 100 # 隐层的神经元数量
X = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1).to(device)
y = X.pow(3) + 0.3 * torch.rand(X.size()).to(device)
plt.figure(figsize=(6, 6))
plt.scatter(X.cpu().numpy(), y.cpu().numpy())
plt.axis('equal');
learning_rate = 1e-3
lambda_l2 = 1e-5
# 建立神经网络模型
model = nn.Sequential(
nn.Linear(D, H),
nn.Linear(H, C)
)
model.to(device) # 模型转到 GPU
# 对于回归问题,使用MSE损失函数
criterion = torch.nn.MSELoss()
# 定义优化器,使用SGD
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=lambda_l2) # built-in L2
# 开始训练
for t in range(1000):
# 数据输入模型得到预测结果
y_pred = model(X)
# 计算 MSE 损失
loss = criterion(y_pred, y)
print("[EPOCH]: %i, [LOSS or MSE]: %.6f" % (t, loss.item()))
display.clear_output(wait=True)
# 反向传播前,梯度清零
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新参数
optimizer.step()
print(model)
plt.figure(figsize=(6,6))
plt.scatter(X.data.cpu().numpy(), y.data.cpu().numpy())
plt.plot(X.data.cpu().numpy(), y_pred.data.cpu().numpy(), 'r-', lw=5)
plt.axis('equal');
Sequential( (0): Linear(in_features=1, out_features=100, bias=True) (1): Linear(in_features=100, out_features=1, bias=True) )
# 这里定义了2个网络,一个 relu_model,一个 tanh_model,
# 使用了不同的激活函数
relu_model = nn.Sequential(
nn.Linear(D, H),
nn.ReLU(),
nn.Linear(H, C)
)
relu_model.to(device)
tanh_model = nn.Sequential(
nn.Linear(D, H),
nn.Tanh(),
nn.Linear(H, C)
)
tanh_model.to(device)
# MSE损失函数
criterion = torch.nn.MSELoss()
# 定义优化器,使用 Adam,这里仍使用 SGD 优化器的化效果会比较差,具体原因请自行百度
optimizer_relumodel = torch.optim.Adam(relu_model.parameters(), lr=learning_rate, weight_decay=lambda_l2)
optimizer_tanhmodel = torch.optim.Adam(tanh_model.parameters(), lr=learning_rate, weight_decay=lambda_l2)
# 开始训练
for t in range(1000):
y_pred_relumodel = relu_model(X)
y_pred_tanhmodel = tanh_model(X)
# 计算损失与准确率
loss_relumodel = criterion(y_pred_relumodel, y)
loss_tanhmodel = criterion(y_pred_tanhmodel, y)
print(f"[MODEL]: relu_model, [EPOCH]: {t}, [LOSS]: {loss_relumodel.item():.6f}")
print(f"[MODEL]: tanh_model, [EPOCH]: {t}, [LOSS]: {loss_tanhmodel.item():.6f}")
display.clear_output(wait=True)
optimizer_relumodel.zero_grad()
optimizer_tanhmodel.zero_grad()
loss_relumodel.backward()
loss_tanhmodel.backward()
optimizer_relumodel.step()
optimizer_tanhmodel.step()
[MODEL]: relu_model, [EPOCH]: 999, [LOSS]: 0.006468 [MODEL]: tanh_model, [EPOCH]: 999, [LOSS]: 0.006947
从上面两个网络的训练中,我们可以观察到,使用 ReLU 激活函数,收敛较快;使用 Tanh 激活函数,一开始收敛较慢,但随后也快速收敛达到较好的效果。
原因如2.3分析所说,使用 ReLU不会出现梯度消失的问题,因为其梯度为1,当然同样也不会有梯度爆炸的问题。这也是为什么使用ReLU得到的收敛速度会比 Tanh 快得多。
plt.figure(figsize=(12, 6))
def dense_prediction(model, non_linearity):
plt.subplot(1, 2, 1 if non_linearity == 'ReLU' else 2)
X_new = torch.unsqueeze(torch.linspace(-1, 1, 1001), dim=1).to(device)
with torch.no_grad():
y_pred = model(X_new)
plt.plot(X_new.cpu().numpy(), y_pred.cpu().numpy(), 'r-', lw=1)
plt.scatter(X.cpu().numpy(), y.cpu().numpy(), label='data')
plt.axis('square')
plt.title(non_linearity + ' models')
dense_prediction(relu_model, 'ReLU')
dense_prediction(tanh_model, 'Tanh')