吴恩达《深度学习》第四课第一周编程作业

参考链接:https://blog.csdn.net/u013733326/article/details/80086090

大致了解卷积神经网络的实现细节,具体实现的时候直接调用相关库函数就行

# coding=utf-8
# This is a sample Python script.

# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import inline as inline
import matplotlib
import numpy as np
import h5py
import matplotlib.pyplot as plt

# %matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

#ipython很好用,但是如果在ipython里已经import过的模块修改后需要重新reload就需要这样
#在执行用户代码前,重新装入软件的扩展和模块。
# %load_ext autoreload
#autoreload 2:装入所有 %aimport 不包含的模块。
# %autoreload 2

np.random.seed(1)      #指定随机种子


# Press the green button in the gutter to run the script.
x = [[1, 2], [3, 4]]

def zero_pad(x, pad):
    x_paded = np.pad(x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0)
    return x_paded


def conv_songle_step(a_slice_prev, W, b):
    s = np.multiply(a_slice_prev, W) + b
    Z = np.sum(s)
    return Z

def conv_forward(A_prev, W, b, hparameters):
    #前一层的输入
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
    #权重矩阵,前2维是过滤器的维数,对每个不同的图像有不同的过滤器,是第三维,最后是本层过滤器的数目。
    (f, f, n_C_prev, n_C) = W.shape
    #过滤器步长
    stride = hparameters["stride"]

    pad = hparameters["pad"]

    n_H = int((n_H_prev - f + 2 * pad) / stride) + 1
    n_W = int((n_W_prev - f + 2 * pad) / stride) + 1

    Z = np.zeros((m, n_H, n_W, n_C))

    A_prev_pad = zero_pad(A_prev, pad)

    for i in range(m):
        a_prev_pad = A_prev_pad[i]#选择第i张图像
        for h in range(n_H):
            for w in range(n_W):
                for c in range(n_C):
                    vert_start = h * stride
                    vert_end = vert_start + f
                    horiz_start = w * stride
                    horiz_end = horiz_start + f
                    a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end]
                    #单步卷积
                    Z[i, h, w, c] = conv_songle_step(a_slice_prev, W[:, :, :, c], b[0, 0, 0, c])
    assert Z.shape == (m, n_H, n_W, n_C)

    cache = A_prev, W, b, hparameters

    return Z, cache


def pool_forward(A_prev, hparameters, mode = "max"):
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape

    f = hparameters["f"]
    stride = hparameters["stride"]

    n_H = int((n_H_prev - f) / stride) + 1
    n_W = int((n_H_prev - f) / stride) + 1
    n_C = n_C_prev

    A = np.zeros((m, n_H, n_W, n_C))

    for i in range(m):
        for h in range(n_H):
            for w in range(n_W):
                for c in range(n_C):
                    vert_start = h * stride
                    vert_end = vert_start + f
                    horiz_start = w * stride
                    horiz_end = horiz_start + w
                    a_slice_prev = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
                    if mode == "max":
                        A[i, h, w, c] = np.max(a_slice_prev)
                    elif mode == "average":
                        A[i, h, w, c] = np.mean(a_slice_prev)
    assert A.shape == (m, n_H, n_W, n_C)

    cache = (A_prev, hparameters)

    return A, cache

def conv_backward(dZ, cache):
    (A_prev, W, b, hparameters) = cache
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
    (m, n_H, n_W, n_C) = dZ.shape
    (f, f, n_C_prev, n_C) = W.shape
    pad = hparameters["pad"]
    stride = hparameters["stride"]

    dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))
    dW = np.zeros((f, f, n_C_prev, n_C))
    db = np.zeros((1, 1, 1, n_C))

    A_prev_pad = zero_pad(A_prev, pad)
    dA_prev_pad = zero_pad(dA_prev, pad)

    for i in range(m):
        a_prev_pad = A_prev_pad[i]
        dA_prev_pad = dA_prev_pad[i]

        for h in range(n_H):
            for w in range(n_W):
                for c in range(n_C):
                    vert_start = h
                    vert_end = h + f
                    horize_start = w
                    horize_end = w + f

                    a_slice = a_prev_pad[vert_start:vert_end, horize_start, horize_end]

                    dA_prev_pad[vert_start:vert_end, horize_start, horize_end] += W[:, :, :, c] * dZ[i, h, w, c]
                    dW[:, :, :, c] += a_slice * dZ[i, h, w, c]
                    db[:, :, :, c] += dZ[i, h, w, c]
        dA_prev[i, :, :, :] = dA_prev_pad[pad:-pad, pad:-pad, :]


def create_mask_from_window(x):
    mask = x == np.max(x)
    return mask


def distribute_value(dz, shape):
    (n_H, n_W) = shape
    average = dz / (n_H * n_W)
    a = np.ones(shape) * average
    return a


def pool_backward(dA, cache, mode = "max"):
    (A_prev, hparameters) = cache
    f = hparameters["f"]
    stride = hparameters["stride"]
    (m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
    (m, n_H, n_W, n_C) = dA.shape

    dA_prev = np.zeros_like(A_prev)

    for i in range(m):
        a_prev = A_prev[i]
        for h in range(n_H):
            for w in range(n_W):
                for c in range(n_C):
                    vert_start = h
                    vert_end = vert_start + f
                    horiz_start = w
                    hpriz_end = horiz_start + f

                    if mode == "max":
                        a_prev_slice = a_prev[vert_start:vert_end, horiz_start:hpriz_end]
                        mask = create_mask_from_window(a_prev_slice)
                        dA_prev[i, vert_start:vert_end, horiz_start:hpriz_end, c] += np.multiply(mask, dA[i, h, w, c])

                    elif mode == "average":
                        da = dA[i, h, w, c]
                        shape = (f, f)
                        dA_prev[i, vert_start:vert_end, horiz_start:hpriz_end, c] += distribute_value(da, shape)
    assert dA_prev.shape == A_prev.shape
    return dA_prev
if __name__ == '__main__':


# See PyCharm help at https://www.jetbrains.com/help/pycharm/

 

posted @ 2021-01-22 17:08  维和战艇机  阅读(225)  评论(0编辑  收藏  举报