MNIST 数据 分类

分类和回归的区别在于输出变量的类型上。 通俗理解定量输出是回归,或者说是连续变量预测; 定性输出是分类,

或者说是离散变量预测。如预测房价这是一个回归任务; 把东西分成几类, 比如猫狗猪牛,就是一个分类任务。

 1 import tensorflow as tf
 2 import numpy as np
 3 from tensorflow.examples.tutorials.mnist import input_data   #首先准备数据
 4 mnist =input_data.read_data_sets('MNIST_data',one_hot=True)
#调用add_layer函数搭建一个最简单的训练网络结构,只有输入层和输出层。
5 def add_layer(inputs, in_size, out_size,activation_function=None): 6 Weights = tf.Variable(tf.random_normal([in_size, out_size])) 7 biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) 8 Wx_plus_b = tf.matmul(inputs, Weights) + biases 9 if activation_function is None: 10 outputs = Wx_plus_b 11 else: 12 outputs = activation_function(Wx_plus_b) 13 return outputs 14 def compute_accuracy(v_xs, v_ys): 15 global prediction 16 y_pre = sess.run(prediction, feed_dict={xs: v_xs}) 17 correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1)) 18 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 19 result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys}) 20 return result 21 #其中输入数据是784个特征,输出数据是10个特征,激励采用softmax函数 22 xs=tf.placeholder(tf.float32,[None,784]) #28*28 23 ys=tf.placeholder(tf.float32,[None,10]) 24 prediction=add_layer(xs,784,10,activation_function=tf.nn.softmax) 25 #loss 26 cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(prediction),reduction_indices=[1])) 27 28 train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) 29 30 init = tf.global_variables_initializer() 31 sess=tf.Session() 32 sess.run(init) 33 for i in range(1000): 34 batch_xs,batch_ys=mnist.train.next_batch(100) 35 sess.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys}) 36 if i%50==0: 37 print(compute_accuracy( 38 mnist.test.images,mnist.test.labels))

 

posted @ 2019-11-26 14:43  小雨点1206  Views(389)  Comments(0Edit  收藏  举报