tensorflow编程模式的理解

1.MNIST数据库下载好后,在tensorflow/examples/tutorials/mnist/下建立文件夹MNIST_data即可运行本程序
2.关键在与理解Operation,Tensor,Graph,只有执行session.run()时操作才真正执行

import tensorflow.examples.tutorials.mnist.input_data as input_data 
mnist = input_data.read_data_sets('MNIST_data/',one_hot = True) 
import tensorflow as tf 
# 定义计算图
# 操作Operation为图的节点(如下面的tf.placeholder(tf.float32,[None,784])等)
# 数据Tensor为图的边(如下面的x,y等)
# 添加Operation时不会立即执行,只有执行session.run(Operation或Tensor)时才会真正执行
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
W = tf.Variable(tf.zeros([784,10]),tf.float32)
b = tf.Variable(tf.zeros([10]),tf.float32)
py = tf.nn.softmax(tf.matmul(x,W) + b)
loss = -tf.reduce_mean(y*tf.log(py))

# 添加计算图节点global_variables_initializer(),返回初始化变量的Operation
# 官方文档解释: Returns an Op that initializes global variables.
init = tf.global_variables_initializer();
# 获得Session对象
sess = tf.Session()
# 真正执行初始化节点init
sess.run(init)

# 训练MNIST数据库
# 添加train_step计算节点,这个计算节点完成梯度下降功能,train_step为一个Operation
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
for i in range(10000):
    batch_xs,batch_ys = mnist.train.next_batch(1000) 
    # 执行梯度下降节点,tensorflow会根据计算图反推依赖,提前计算依赖节点
    # 由于x,y中含有None,需要feed_dict = {x:batch_xs,y:batch_ys}填充数据
    sess.run(train_step,feed_dict = {x:batch_xs,y:batch_ys})
    # observe gradient descent in training set
    if i%100 == 0:
        # 计算节点correct_prediction
        correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(py,1))
        # 计算节点accuracy
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        # 反推图依赖,得到正确的accuracy
        print('training set accuracy: ',sess.run(accuracy,feed_dict={x:batch_xs,y:batch_ys}))
# 观察测试集的performance
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(py,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print('test set accuracy: ',sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))

posted @ 2018-07-21 13:49  技术流选手  阅读(318)  评论(0编辑  收藏  举报