Tensorflow学习教程------普通神经网络对mnist数据集分类

首先是不含隐层的神经网络, 输入层是784个神经元 输出层是10个神经元

代码如下

#coding:utf-8
import  tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


#载入数据集
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch =  mnist.train.num_examples // batch_size

#定义两个placeholder
x = tf.placeholder(tf.float32, [None,784]) #输入图像
y = tf.placeholder(tf.float32, [None,10]) #输入标签

#创建一个简单的神经网络 784个像素点对应784个数  因此输入层是784个神经元 输出层是10个神经元 不含隐层 
#最后准确率在92%左右
W = tf.Variable(tf.zeros([784,10])) #生成784行 10列的全0矩阵
b = tf.Variable(tf.zeros([1,10])) 
prediction = tf.nn.softmax(tf.matmul(x,W)+b)

#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

#初始化变量
init = tf.global_variables_initializer()

#结果存放在布尔型列表中
#argmax能给出某个tensor对象在某一维上的其数据最大值所在的索引值
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction,1))
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(21): #21个epoch 把所有的图片训练21次
        for batch in range(n_batch): #
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        acc = sess.run(accuracy,feed_dict={x:mnist.test.images, y:mnist.test.labels}) 
        print ("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))   

结果如下

Iter 0,Testing Accuracy 0.8304
Iter 1,Testing Accuracy 0.8704
Iter 2,Testing Accuracy 0.8821
Iter 3,Testing Accuracy 0.8876
Iter 4,Testing Accuracy 0.8932
Iter 5,Testing Accuracy 0.8968
Iter 6,Testing Accuracy 0.8995
Iter 7,Testing Accuracy 0.9019
Iter 8,Testing Accuracy 0.9033
Iter 9,Testing Accuracy 0.9048
Iter 10,Testing Accuracy 0.9065
Iter 11,Testing Accuracy 0.9074
Iter 12,Testing Accuracy 0.9084
Iter 13,Testing Accuracy 0.909
Iter 14,Testing Accuracy 0.9094
Iter 15,Testing Accuracy 0.9112
Iter 16,Testing Accuracy 0.9117
Iter 17,Testing Accuracy 0.9128
Iter 18,Testing Accuracy 0.9127
Iter 19,Testing Accuracy 0.9132
Iter 20,Testing Accuracy 0.9144

 接下来是含一个隐层的神经网络,输入层是784个神经元,两个隐层都是100个神经元,输出层是10个神经元,迭代500次,最后准确率在88%左右,汗。。。。准确率反而降低了,慢慢调参吧

#coding:utf-8
import  tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


#载入数据集
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
#每个批次的大小
batch_size = 50
#计算一共有多少个批次
n_batch =  mnist.train.num_examples // batch_size

#定义两个placeholder
x = tf.placeholder(tf.float32, [None,784]) #输入图像
y = tf.placeholder(tf.float32, [None,10]) #输入标签


#定义神经网络中间层
Weights_L1 = tf.Variable(tf.random_normal([784,100]))
biase_L1 = tf.Variable(tf.zeros([1,100])) 
Wx_plus_b_L1 = tf.matmul(x, Weights_L1)+biase_L1 
L1 = tf.nn.tanh(Wx_plus_b_L1) #使用正切函数作为激活函数 

Weights_L2 = tf.Variable(tf.random_normal([100,100]))
biase_L2 = tf.Variable(tf.zeros([1,100])) 
Wx_plus_b_L2 = tf.matmul(L1, Weights_L2)+biase_L2 
L2 = tf.nn.tanh(Wx_plus_b_L2) #使用正切函数作为激活函数 

#定义神经网络输出层
Weights_L3 = tf.Variable(tf.random_normal([100,10]))
biase_L3 = tf.Variable(tf.zeros([1,10]))
Wx_plus_b_L3 = tf.matmul(L2,Weights_L3) + biase_L3
prediction = tf.nn.tanh(Wx_plus_b_L3)

#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

#初始化变量
init = tf.global_variables_initializer()

#结果存放在布尔型列表中
#argmax能给出某个tensor对象在某一维上的其数据最大值所在的索引值
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction,1))
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(500): 
        for batch in range(n_batch):             batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        acc = sess.run(accuracy,feed_dict={x:mnist.test.images, y:mnist.test.labels}) 
        print ("Iter " + str(epoch) + ",Testing Accuracy " + str(acc))   
Iter 487,Testing Accuracy 0.8847
Iter 488,Testing Accuracy 0.8853
Iter 489,Testing Accuracy 0.878
Iter 490,Testing Accuracy 0.8861
Iter 491,Testing Accuracy 0.8863
Iter 492,Testing Accuracy 0.8784
Iter 493,Testing Accuracy 0.8855
Iter 494,Testing Accuracy 0.8787
Iter 495,Testing Accuracy 0.881
Iter 496,Testing Accuracy 0.8837
Iter 497,Testing Accuracy 0.8817
Iter 498,Testing Accuracy 0.8837
Iter 499,Testing Accuracy 0.8866

 

posted @ 2017-10-07 20:47  qilibin  阅读(4687)  评论(1编辑  收藏  举报