<tensorflow实战>之5.3实现进阶的卷积网咯
环境:tensorflow最新版 可在现有tensorflow基础上使用 pip install --upgrade tensorflow-gpu
然后下载 cudnn6.0 : https://developer.nvidia.com/rdp/cudnn-archive,并将三个文件复制到C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v8.0对应的三个文件夹
之后在cmd环境中import tensorflow 发现无误之后进行下面的操作
首先需要按照书上第85页要求:下载tensorflow model 库,
git clone https://github.com/tensorflow/models.git cd models/tutorials/image/cifar10
然后会出现一个models的文件夹,将models文件夹下的 cifar10.py和cifar10_input.py拷贝到与5_3_CNN_CIFAR10.py一样的文件夹下
更改5_3_CNN_CIFAR10.py中的
data_dir = './cifar10_data/cifar-10-batches-bin'
运行5_3_CNN_CIFAR10.py并将下载下来的cifar-10-batches-bin文件拷贝到cifar-10-batches-bin文件夹下【可能需要搜索,才能找到文件下载的地方】
然后执行
#%% # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== #import os import tensorflow as tf import cifar10 import cifar10_input import numpy as np import time max_steps = 3000 batch_size = 128 data_dir = './cifar10_data/cifar-10-batches-bin' def variable_with_weight_loss(shape, stddev, wl): var = tf.Variable(tf.truncated_normal(shape, stddev=stddev)) if wl is not None: weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name='weight_loss') tf.add_to_collection('losses', weight_loss) # 把变量放入一个集合,把很多变量变成一个列表 return var def loss(logits, labels): # """Add L2Loss to all the trainable variables. # Add summary for "Loss" and "Loss/avg". # Args: # logits: Logits from inference(). # labels: Labels from distorted_inputs or inputs(). 1-D tensor # of shape [batch_size] # Returns: # Loss tensor of type float. # """ # # Calculate the average cross entropy loss across the batch. labels = tf.cast(labels, tf.int64) # 类型转换说 cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name='cross_entropy_per_example') # 稀疏化的类别标签 cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') # 从一个结合中取出全部变量tf.get_collection,tf.add_n把一个列表的东西都依次加起来 ### cifar10.maybe_download_and_extract() images_train, labels_train = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size) images_test, labels_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size) #images_train, labels_train = cifar10.distorted_inputs() #images_test, labels_test = cifar10.inputs(eval_data=True) image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3]) label_holder = tf.placeholder(tf.int32, [batch_size]) #logits = inference(image_holder) weight1 = variable_with_weight_loss(shape=[5, 5, 3, 64], stddev=5e-2, wl=0.0) # wl=0.0表示不对卷积层的weight进行正则化 kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME') # 卷积图 bias1 = tf.Variable(tf.constant(0.0, shape=[64])) # 卷积层的bias初始化为0 conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1)) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) # 对卷积结果进行LRN处理 weight2 = variable_with_weight_loss(shape=[5, 5, 64, 64], stddev=5e-2, wl=0.0) # 第二个卷积层 kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding='SAME') bias2 = tf.Variable(tf.constant(0.1, shape=[64])) conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2)) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') reshape = tf.reshape(pool2, [batch_size, -1]) # 全连接层 dim = reshape.get_shape()[1].value weight3 = variable_with_weight_loss(shape=[dim, 384], stddev=0.04, wl=0.004) # 384为隐含节点数 对这种全连接层的权重进行正则化 bias3 = tf.Variable(tf.constant(0.1, shape=[384])) local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3) weight4 = variable_with_weight_loss(shape=[384, 192], stddev=0.04, wl=0.004) # 192也是隐含节点数 bias4 = tf.Variable(tf.constant(0.1, shape=[192])) local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4) weight5 = variable_with_weight_loss(shape=[192, 10], stddev=1/192.0, wl=0.0) bias5 = tf.Variable(tf.constant(0.0, shape=[10])) logits = tf.add(tf.matmul(local4, weight5), bias5) # 预测的标签 loss = loss(logits, label_holder) train_op = tf.train.AdamOptimizer(1e-3).minimize(loss) #0.72 top_k_op = tf.nn.in_top_k(logits, label_holder, 1) # 求输出结果中top k的准确率,默认是top 1,也就是输出分数最高的那一类的准确率 sess = tf.InteractiveSession() # 创建默认的session tf.global_variables_initializer().run() # 初始化全部模型参数 tf.train.start_queue_runners() # 启动前面提到的图片数据增强的线程队列 ### for step in range(max_steps): start_time = time.time() image_batch,label_batch = sess.run([images_train,labels_train]) # 获得一个batch的数据 loss_value = sess.run([train_op, loss],feed_dict={image_holder: image_batch, label_holder:label_batch}) duration = time.time() - start_time # 记录每一个step花费的时间 if step % 10 == 0: examples_per_sec = batch_size / duration sec_per_batch = float(duration) format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)') print(format_str % (step, loss_value[1], examples_per_sec, sec_per_batch)) ### 测试评测 num_examples = 10000 import math num_iter = int(math.ceil(num_examples / batch_size)) true_count = 0 total_sample_count = num_iter * batch_size step = 0 while step < num_iter: image_batch,label_batch = sess.run([images_test,labels_test]) predictions = sess.run([top_k_op],feed_dict={image_holder: image_batch, label_holder:label_batch}) true_count += np.sum(predictions) step += 1 precision = true_count / total_sample_count print('precision @ 1 = %.3f' % precision)
但是还是报错,很尴尬。。。