这两周我学习了北京大学曹建老师的TensorFlow笔记课程,认为老师讲的很不错的,很适合于想要在短期内上手完成一个相关项目的同学,课程在b站和MOOC平台都可以找到。

在卷积神经网络一节,课程以lenet5为例,给出了完整的代码,通过这样一个例子完成了模型构建、较大数据量的训练和测试。整个代码不复杂,架构完整,我觉得代码很干净,很优秀,所以想把之后需要实现的Alexnet等网络结构都按照这个代码的结构来改。

下面是lenet5实现,数据集依然mnist。

forward.py

#coding:utf-8
import tensorflow as tf
IMAGE_SIZE = 28
NUM_CHANNELS = 1
CONV1_SIZE = 5
CONV1_KERNEL_NUM = 32
CONV2_SIZE = 5
CONV2_KERNEL_NUM = 64
FC_SIZE = 512
OUTPUT_NODE = 10


def get_weight(shape, regularizer): # 参数:生成张量的维度、正则化权重
    w = tf.Variable(tf.truncated_normal(shape, stddev=0.1))
    if regularizer != None: tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(regularizer)(w))
    return w


def get_bias(shape):
    b = tf.Variable(tf.zeros(shape))
    return b


def conv2d(x, w): #参数:输入图片x和所用卷积核w 都为四阶张量
    return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


def forward(x, train, regularizer):
    conv1_w = get_weight([CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer)
    conv1_b = get_bias([CONV1_KERNEL_NUM])
    conv1 = conv2d(x, conv1_w)
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b))
    pool1 = max_pool_2x2(relu1)

    conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM], regularizer)
    conv2_b = get_bias([CONV2_KERNEL_NUM])
    conv2 = conv2d(pool1, conv2_w)
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
    pool2 = max_pool_2x2(relu2)
    # pool2为第二个卷积层的输出,需要把它从三维张量变为二维张量 

    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    # [0]是betch的值,此处我们提取[1][2][3]是特征的长、宽、深度相乘得到所有特征点的个数
    reshaped = tf.reshape(pool2, [pool_shape[0], nodes]) # 重塑为二维

    fcl_w = get_weight([nodes, FC_SIZE], regularizer)
    fcl_b = get_bias([FC_SIZE])
    fcl = tf.nn.relu(tf.matmul(reshaped, fcl_w) + fcl_b) # 将二维特征输入全连接网络
    if train: fcl = tf.nn.dropout(fcl, 0.5) # 如果是训练阶段,则对该层的输出进行50%dropout

    fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
    fc2_b = get_bias(OUTPUT_NODE)
    y = tf.matmul(fcl, fc2_w) + fc2_b
    return y

backward.py

#coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
import forward
import numpy as np

BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.005
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH="./model/"
MODEL_NAME="mnist_model"

def backward(mnist):
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,
        forward.IMAGE_SIZE,
        forward.IMAGE_SIZE,
        forward.NUM_CHANNELS
    ])
    y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
    y = forward.forward(x, True, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True
    )

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        for i in range(STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            reshaped_xs = np.reshape(xs, (
                BATCH_SIZE,
                forward.IMAGE_SIZE,
                forward.IMAGE_SIZE,
                forward.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
            if i % 100 == 0:
                print("After %d training step(s), loss an training batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main():
    mnist = input_data.read_data_sets("data", one_hot=True)
    backward(mnist)

if __name__=='__main__':
    main()

test.py

# coding:utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import forward
import backward
import numpy as np

TEST_INTERVAL_SECS = 5


def evaluate(mnist):
    with tf.Graph().as_default() as g: # 再现图
        x = tf.placeholder(tf.float32, [
            mnist.test.num_examples,
            forward.IMAGE_SIZE,
            forward.IMAGE_SIZE,
            forward.NUM_CHANNELS])
        y_ = tf.placeholder(tf.float32, [None, forward.OUTPUT_NODE])
        y = forward.forward(x, False, None)

        # 实例化带滑动平均的Saver对象
        ema = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        while True:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
                # 判断是否有模型,如果有,恢复模型到当前会话
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)

                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    reshaped_x = np.reshape(mnist.test.images, (
                        mnist.test.num_examples,
                        forward.IMAGE_SIZE,
                        forward.IMAGE_SIZE,
                        forward.NUM_CHANNELS))
                    accuracy_score = sess.run(accuracy, feed_dict={x: reshaped_x, y_: mnist.test.labels})
                    print("After %s training step(s), test accuracy = %g" % (global_step, accuracy_score))
                else:
                    print('No checkpoint file found')
                    return
            time.sleep(TEST_INTERVAL_SECS)


def main():
    mnist = input_data.read_data_sets("data", one_hot=True)
    evaluate(mnist)


if __name__ == '__main__':
    main()

在自己电脑上运行还真的需要time.sleep,要不然跑起来CPU占用一直99%只能强制关机了。

while True 的循环体,会一直判断并拿到当前最新的训练模型,电脑上实现不能够边训练边测试,不能看到测试准确率在整个训练过程中的变化,只能看到最后的结果啦。(训练完成用了整整一天)

下一步就是明天参考着这个完成Alexnet的整体实现啦。