import numpy as np
import pickle
import cv2
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
train_data = {b'data': [], b'labels': []}
with open("D:/TensorFlow_gpu/animal.pickle", mode='rb') as file:
data = pickle.load(file, encoding='bytes')
train_data[b'data'] += list(data['train_images'])
train_data[b'labels'] += list(data['train_label'])
train_epochs = 802 # 训练轮数
batch_size = 40 # 随机出去数据大小
display_step = 10 # 显示训练结果的间隔
learning_rate = 0.000001 # 学习效率
drop_prob = 0.2 # 正则化,丢弃比例
fch_nodes = 256 # 全连接隐藏层神经元的个数
def weight_init(shape):
weights = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)#符合正太分布mean=0
#weights = tf.truncated_normal(shape, mean=0.01, stddev=0.1, dtype=tf.float32)
return tf.Variable(weights)
# 偏置的初始化
def biases_init(shape):
biases = tf.random_normal(shape, dtype=tf.float32)
# biases = tf.random_normal(shape, mean=-0.01, stddev=0.1, dtype=tf.float32)
return tf.Variable(biases)
# 随机选取mini_batch
def get_random_batchdata(n_samples, batchsize):
start_index = np.random.randint(0, n_samples - batchsize)
return (start_index, start_index + batchsize)
def xavier_init(layer1, layer2, constant=1):
Min = -constant * np.sqrt(6.0 / (layer1 + layer2))
Max = constant * np.sqrt(6.0 / (layer1 + layer2))
return tf.Variable(tf.random_uniform((layer1, layer2), minval=Min, maxval=Max, dtype=tf.float32))
def conv2d(x, w):
return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
x = tf.placeholder(tf.float32, [None, 224,224,3])
y = tf.placeholder(tf.float32, [None, 2])
# 把灰度图像一维向量,转换为28x28二维结构
x_image = x
w_conv1 = weight_init([3, 3, 3, 96]) # 3*3,深度为3,96
b_conv1 = biases_init([96])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) # 输出张量的尺寸:112
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_init([3, 3, 96, 96])
b_conv2 = biases_init([96])
h_conv2 = tf.nn.tanh(conv2d(h_pool1, W_conv2) + b_conv2)#输出是56
h_pool2 = max_pool_2x2(h_conv2)#池化后输出16*16*96
#2-1
W_conv3 = weight_init([3, 3, 96, 128])
b_conv3 = biases_init([128])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)#输出28
h_pool3 = max_pool_2x2(h_conv3)#池化后输出16*16*96
#第2层卷积2-2
W_conv4 = weight_init([3, 3, 128, 128])
b_conv4 = biases_init([128])
h_conv4 = tf.nn.tanh(conv2d(h_pool3, W_conv4) + b_conv4)#14
h_pool4 = max_pool_2x2(h_conv4)#池化输出8*8*128
#3-1
W_conv5 = weight_init([3, 3, 128, 256])
b_conv5 = biases_init([256])
h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5)#7*7*256
h_pool5 = max_pool_2x2(h_conv5)#
h_pool5_flat = tf.reshape(h_pool5, [-1, 7 * 7 * 256])
w_fc1 = xavier_init(7 * 7 * 256, fch_nodes)
b_fc1 = biases_init([fch_nodes])
h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, w_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=drop_prob)
# 隐藏层与输出层权重初始化
w_fc2 = xavier_init(fch_nodes, 2)
b_fc2 = biases_init([2])
# 未激活的输出
y_ = tf.add(tf.matmul(h_fc1, w_fc2), b_fc2)
#y_ = tf.add(tf.matmul(h_fc1_drop, w_fc2), b_fc2)
# 激活后的输出
y_out = tf.nn.softmax(y_)
#y_out = tf.nn.sigmoid(y_)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_out), reduction_indices=[1]))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# 准确率
# 每个样本的预测结果是一个(1,10)的vector
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_out, 1))
# tf.cast把bool值转换为浮点数
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
#mnist = input_data.read_data_sets('MNIST/mnist', one_hot=True)
n_samples = int(1800)
total_batches = int(n_samples / batch_size)
#x_train = np.array(train_data[b'data']) / 255
x_train = np.array(train_data[b'data'])
y_train = np.array(pd.get_dummies(train_data[b'labels']))
#x_test = test_data[b'data'] / 255