tensorflow学习之路-----简单卷积神经网路

 

import tensorflow as tf
#取数据,目的是辨别数字
from tensorflow.examples.tutorials.mnist import input_data
'''
手动添加数据集:先把4个数据包放进当前目录的文件夹里面
'''
MNIST_data_folder="C:\\Users\\悟悔\\MNIST_data"#路径
mnist=input_data.read_data_sets("MNIST_data/",one_hot=True)
def   weight_variable(shape):
  initial = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
  return initial
def bais_variable(shape):
    return tf.constant(0.1,shape=shape)
#定义卷积函数
def conv2d(x,W):
  return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding="SAME")
def max_pool_2x2(x):
  return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")

x_data = tf.placeholder(tf.float32,[None,784])#28x28
y_data = tf.placeholder(tf.float32,[None,10])
keep_prol = tf.placeholder(tf.float32)
x_image = tf.reshape(x_data,[-1,28,28,1])
'''
-1:表示可以接受任意的照片
28,28=28x28:表示像素
1:表示图像通道,也可以表示图片的厚度。因为是黑白的所以是1,如果是RGB照片就是3
'''
'''
第一次卷积
'''
conv1_weight = weight_variable([5,5,1,32])
'''
5,5:表述卷积核的大小,即长宽
1:表示图像通道,也可以表示图片的厚度。因为是黑白的所以是1,如果是RGB照片就是3
32:自定义的一个输出厚度【特征映射】
'''
conv1_bais = bais_variable([32])
'''
要和卷积过后的输出厚度一致
'''
d_conv1 = tf.nn.relu(conv2d(x_image,conv1_weight)+conv1_bais)#用一个激烈函数,将他们非线性化,28x28x32
'''
运算:28x28x1 经过卷积核 5x5x32 转化为 28x28x32 (因为这是SAME,因此输出的大小和原来的相同,32是因为我们自定义输出的厚度)
'''
d_pool1 = max_pool_2x2(d_conv1)#14x14x32
'''
池化运算:
28x28x32 因为池化窗口大小2x2,并且池化是将像素缩短的过程,因此为14x14x32
'''

'''
第二次卷积
'''
conv2_weight = weight_variable([5,5,32,64])
conv2_bais = bais_variable([64])
d_conv2 = conv2d(d_pool1,conv2_weight)+conv2_bais
d_pool2 = max_pool_2x2(d_conv2)
'''
这个运算是和上面的一样
'''
'''
第一次全连神经
'''
Weights1 = weight_variable([7*7*64,1024])
baises1 = bais_variable([1024])
inputs1 = tf.reshape(d_pool2,[-1,7*7*64])
'''
将d_pool2转化为一维的数据
'''
W_plus_bs1= tf.nn.relu(tf.matmul(inputs1,Weights1)+baises1)
W_plus_b1 = tf.nn.dropout(W_plus_bs1,keep_prol)

'''
第二次全连接
'''
Weights2 = weight_variable([1024,10])
baises2 = bais_variable([10])
prediction = tf.nn.softmax(tf.matmul(W_plus_b1,Weights2)+baises2)

'''
误差分析和优化
'''
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_data * tf.log(prediction),
reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
init = tf.global_variables_initializer()

def compute_accuracy(v_xs, v_ys):
  global prediction
  y_pre = sess.run(prediction, feed_dict={x_data: v_xs, keep_prol: 1})
  correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prol: 1})
  return result

with tf.Session()as sess:
  sess.run(init)
  for i in range(1000):
  batch_x,batch_y = mnist.train.next_batch(100)
  sess.run(train_step,feed_dict={x_data:batch_x,y_data:batch_y,keep_prol:0.5})
  if i%50==0:
    print(compute_accuracy(mnist.test.images,mnist.test.labels))

posted @ 2018-08-08 09:54  Myuniverse  阅读(360)  评论(0编辑  收藏  举报