【阿迪的深度学习之路】TF Girls 修炼指南1:训练过程数据分析
''' 跟随大神学习TF Girls系列教程,感谢大神;这篇文章对训练过程进行分析。 代码地址:https://github.com/CreatCodeBuild/TensorFlow-and-DeepLearning-Tutorial.git '''
train_batch_size=64
1批次为64张图片
input samples.shape= (64, 32, 32, 1)
输入数据64张图片,图片大小32x32,通道数1(加载图片后已进行灰度化: 从三色通道 -> 单色通道)
input labels.shape= (64, 10)
输入64个标签,标签类型为one-hot向量,详细如下:
input labels= [ [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]]
为了方便理解训练过程,在代码中增加打印,过程分析详见如下:
(base) E:\90.work\AI\DeepLearning\TensorFlow-and-DeepLearning-Tutorial\Season1\20>python main.py E:\90.work\AI\DeepLearning\TensorFlow-and-DeepLearning-Tutorial\Season1\20 2019-06-14 23:45:14.709413: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 2019-06-14 23:45:14.888604: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: name: GeForce RTX 2070 major: 7 minor: 5 memoryClockRate(GHz): 1.62 pciBusID: 0000:04:00.0 totalMemory: 8.00GiB freeMemory: 6.59GiB 2019-06-14 23:45:14.893460: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0 2019-06-14 23:45:15.323266: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix: 2019-06-14 23:45:15.326346: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 2019-06-14 23:45:15.327523: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N 2019-06-14 23:45:15.328806: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 6553 MB memory) -> physical GPU (device: 0, name: GeForce RTX 2070, pci bus id: 0000:04:00.0, compute capability: 7.5) #数据集:(图片总数,单张图片宽,单张图片高,单张通道数)(label总数,单个label长度) Training set (73257, 32, 32, 1) (73257, 10) Test set (26032, 32, 32, 1) (26032, 10) image_size= 32 num_labels= 10 num_channels= 1 #[FUNCFLAG]表示函数调用标志 [FUNCFLAG]entry __init__ func [FUNCFLAG]entry define_inputs func [FUNCFLAG]entry add_conv func WARNING:tensorflow:From C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version. Instructions for updating: Colocations handled automatically by placer. [FUNCFLAG]entry add_conv func [FUNCFLAG]entry add_conv func [FUNCFLAG]entry add_conv func [FUNCFLAG]entry add_fc func [FUNCFLAG]entry add_fc func [FUNCFLAG]entry define_model func [FUNCFLAG]entry model func ##############Convolutional Layers Begin################## #第一层卷积:输入(64,32,32,1),输出(64, 32, 32, 32) #weights权重,也就是feature map/filter大小为3x3,1通道,数量32个,不知道为什么output_depth要设置为32? [ConvLayer]weights= <tf.Variable 'conv1/conv1_weights:0' shape=(3, 3, 1, 32) dtype=float32_ref> #biases偏置,一维的,大小与filter数量一致。 [ConvLayer]biases= <tf.Variable 'conv1/conv1_biases:0' shape=(32,) dtype=float32_ref> #输入数据shape为(64,32,32,1),注意卷积完了后输出数据shape最后一位变为了32,因为output_depth为32 [ConvLayer]data_flow after conv2d= Tensor("conv1_model/convolution/Conv2D:0", shape=(64, 32, 32, 32), dtype=float32) #添加偏置后shape不变 [ConvLayer]data_flow after add biases= Tensor("conv1_model/convolution/add:0", shape=(64, 32, 32, 32), dtype=float32) #激活后shape不变 [ConvLayer]data_flow after relu= Tensor("conv1_model/Relu:0", shape=(64, 32, 32, 32), dtype=float32) #第二层卷积:输入(64,32,32,32),输出(64, 16, 16, 32) #权重shape第三位为32,因为第一层卷积output_depth为32 [ConvLayer]weights= <tf.Variable 'conv2/conv2_weights:0' shape=(3, 3, 32, 32) dtype=float32_ref> [ConvLayer]biases= <tf.Variable 'conv2/conv2_biases:0' shape=(32,) dtype=float32_ref> [ConvLayer]data_flow after conv2d= Tensor("conv2_model/convolution/Conv2D:0", shape=(64, 32, 32, 32), dtype=float32) [ConvLayer]data_flow after add biases= Tensor("conv2_model/convolution/add:0", shape=(64, 32, 32, 32), dtype=float32) [ConvLayer]data_flow after relu= Tensor("conv2_model/Relu:0", shape=(64, 32, 32, 32), dtype=float32) #池化,scale为2,所以池化后图片大小变为16x16 [ConvLayer]data_flow after max_pool= Tensor("conv2_model/MaxPool:0", shape=(64, 16, 16, 32), dtype=float32) #第三层卷积:输入(64,16,16,32),输出(64,16,16,32) [ConvLayer]weights= <tf.Variable 'conv3/conv3_weights:0' shape=(3, 3, 32, 32) dtype=float32_ref> [ConvLayer]biases= <tf.Variable 'conv3/conv3_biases:0' shape=(32,) dtype=float32_ref> [ConvLayer]data_flow after conv2d= Tensor("conv3_model/convolution/Conv2D:0", shape=(64, 16, 16, 32), dtype=float32) [ConvLayer]data_flow after add biases= Tensor("conv3_model/convolution/add:0", shape=(64, 16, 16, 32), dtype=float32) [ConvLayer]data_flow after relu= Tensor("conv3_model/Relu:0", shape=(64, 16, 16, 32), dtype=float32) #第四层卷积:输入(64,16,16,32),输出(64,8,8,32) [ConvLayer]weights= <tf.Variable 'conv4/conv4_weights:0' shape=(3, 3, 32, 32) dtype=float32_ref> [ConvLayer]biases= <tf.Variable 'conv4/conv4_biases:0' shape=(32,) dtype=float32_ref> [ConvLayer]data_flow after conv2d= Tensor("conv4_model/convolution/Conv2D:0", shape=(64, 16, 16, 32), dtype=float32) [ConvLayer]data_flow after add biases= Tensor("conv4_model/convolution/add:0", shape=(64, 16, 16, 32), dtype=float32) [ConvLayer]data_flow after relu= Tensor("conv4_model/Relu:0", shape=(64, 16, 16, 32), dtype=float32) #再次池化,图片大小变为8x8 [ConvLayer]data_flow after max_pool= Tensor("conv4_model/MaxPool:0", shape=(64, 8, 8, 32), dtype=float32) ############Convolutional Layers End############### ############Fully Connected Layers Begin############ #第一层全连接:输入(64,8,8,32),输出(64,128),不知道为什么output_depth要设置为128? i= 0 [FcLayer]len(self.fc_weights)= 2 [FcLayer]weights= <tf.Variable 'fc1/Variable:0' shape=(2048, 128) dtype=float32_ref> [FcLayer]biases= <tf.Variable 'fc1/Variable_1:0' shape=(128,) dtype=float32_ref> [FcLayer]shape= [64, 8, 8, 32] [FcLayer]data_flow after reshape= Tensor("Reshape:0", shape=(64, 2048), dtype=float32) #data_flow(64, 2048)*weights(2048, 128)+biases(128,)=(64, 128) [FcLayer]data_flow after matmul= Tensor("fc1model/add:0", shape=(64, 128), dtype=float32) [FcLayer]data_flow after relu= Tensor("fc1model/Relu:0", shape=(64, 128), dtype=float32) #第二层全连接:输入(64,128),输出(64,10),注意:第二层全连接就是模型中最后一层,输出与label大小一致。 i= 1 [FcLayer]len(self.fc_weights)= 2 [FcLayer]weights= <tf.Variable 'fc2/Variable:0' shape=(128, 10) dtype=float32_ref> [FcLayer]biases= <tf.Variable 'fc2/Variable_1:0' shape=(10,) dtype=float32_ref> WARNING:tensorflow:From E:\90.work\AI\DeepLearning\TensorFlow-and-DeepLearning-Tutorial\Season1\20\dp.py:191: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version. Instructions for updating: Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`. #dropout后数据大小不变 [FcLayer]data_flow after dropout= Tensor("fc2model/dropout/mul:0", shape=(64, 128), dtype=float32) #data_flow(64, 128)*weights(128, 10)+biases(10,)=(64, 10) [FcLayer]data_flow after matmul= Tensor("fc2model/add:0", shape=(64, 10), dtype=float32) ############Fully Connected Layers End############ #logits即为最后一层模型输出,大小为(64, 10) logits= Tensor("fc2model/add:0", shape=(64, 10), dtype=float32) #计算softmax交叉熵,一定要理解清楚 WARNING:tensorflow:From E:\90.work\AI\DeepLearning\TensorFlow-and-DeepLearning-Tutorial\Season1\20\dp.py:211: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version. Instructions for updating: Future major versions of TensorFlow will allow gradients to flow into the labels input on backprop by default. See `tf.nn.softmax_cross_entropy_with_logits_v2`. [FUNCFLAG]entry apply_regularization func WARNING:tensorflow:From E:\90.work\AI\DeepLearning\TensorFlow-and-DeepLearning-Tutorial\Season1\20\dp.py:259: all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02. Instructions for updating: Please use tf.global_variables instead. [FUNCFLAG]entry train func 2019-06-14 23:45:15.835680: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0 2019-06-14 23:45:15.838482: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix: 2019-06-14 23:45:15.842053: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 2019-06-14 23:45:15.843707: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N 2019-06-14 23:45:15.847071: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 6553 MB memory) -> physical GPU (device: 0, name: GeForce RTX 2070, pci bus id: 0000:04:00.0, compute capability: 7.5) Start Training [FUNCFLAG]entry train_data_iterator func input samples= [[[[-0.7369791 ] [-0.8515625 ] [-0.8671875 ] ... [-0.4973958 ] [-0.4375 ] [-0.4088542 ]] [[-0.734375 ] [-0.8411458 ] [-0.8671875 ] ... [-0.6640625 ] [-0.5390625 ] [-0.4244792 ]] [[-0.6901041 ] [-0.8307292 ] [-0.8541667 ] ... [-0.7473959 ] [-0.6848959 ] [-0.4947917 ]] ... [[-0.7864583 ] [-0.7786458 ] [-0.7682292 ] ... [-0.3958333 ] [-0.4583333 ] [-0.5234375 ]] [[-0.7864583 ] [-0.7786458 ] [-0.7682292 ] ... [-0.453125 ] [-0.5104166 ] [-0.5677084 ]] [[-0.7786458 ] [-0.7760417 ] [-0.7708333 ] ... [-0.484375 ] [-0.5416666 ] [-0.5989584 ]]]] input samples.shape= (64, 32, 32, 1) input labels= [ [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]] input labels.shape= (64, 10) 2019-06-14 23:45:16.035727: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library cublas64_100.dll locally [FUNCFLAG]entry accuracy func #predictions是全连接层后通过softmax计算出的概率分布,很重要 predictions= [ [0.01204864 0.8913779 0.01227294 0.00805012 0.00496502 0.01926311 0.00640195 0.00729137 0.03261255 0.00571635] [0.00975266 0.87809265 0.00885093 0.00978949 0.00750906 0.02246663 0.0093563 0.00707605 0.04215763 0.00494864] [0.00995087 0.9055381 0.00103758 0.00472897 0.00856805 0.03065675 0.01146686 0.00850798 0.01794503 0.00159983] [0.01046041 0.8873753 0.0031814 0.0036646 0.00988663 0.02377869 0.01400038 0.00846286 0.03646423 0.00272551] [0.04224218 0.7272563 0.00926376 0.00426366 0.03603369 0.05469245 0.02986171 0.03421704 0.05632145 0.00584778] [0.01578838 0.77256846 0.00973972 0.00725134 0.01356059 0.08701497 0.00794157 0.02059117 0.05647686 0.00906697] [0.01330926 0.9183194 0.00366373 0.00636685 0.00476729 0.0109355 0.00789363 0.00419887 0.02559856 0.00494684] [0.01267263 0.8148464 0.00856461 0.0150369 0.00899209 0.07114679 0.00509116 0.01657836 0.03933861 0.00773242] [0.01232944 0.82476586 0.00394617 0.02165573 0.00598027 0.05804176 0.02486391 0.01154539 0.02713767 0.00973381] [0.01474693 0.80654246 0.03152365 0.02015838 0.01126841 0.03880023 0.01219469 0.0146895 0.04250919 0.00756662] [0.01560207 0.81422406 0.03276257 0.00848547 0.01413399 0.04463826 0.00539044 0.01947433 0.03145365 0.01383514] [0.02409113 0.7088396 0.04169954 0.014316 0.00589203 0.09627787 0.02065787 0.01812749 0.05759656 0.01250186] [0.01504009 0.8523164 0.00505404 0.00694582 0.0141663 0.02990408 0.02521938 0.01084515 0.03638896 0.00411991] [0.01014019 0.59873223 0.00421534 0.02186191 0.00474417 0.18755214 0.0354718 0.00809004 0.12643625 0.00275596] [0.0150901 0.8905766 0.00305974 0.00586342 0.01106505 0.01838221 0.02041364 0.00758798 0.0233263 0.00463501] [0.01341309 0.84217376 0.0027219 0.01024784 0.01198278 0.0557069 0.01070907 0.00884527 0.0398845 0.00431497] [0.00541799 0.89799553 0.00201627 0.00709545 0.00697466 0.04084396 0.01492796 0.01041177 0.01094766 0.00336886] [0.01697449 0.8276487 0.00297997 0.02250313 0.01162772 0.03606217 0.02167042 0.01647696 0.02748463 0.01657181] [0.00981858 0.8943962 0.00239577 0.00613504 0.00765723 0.03185484 0.011197 0.01359929 0.02036428 0.00258184] [0.02743118 0.8791956 0.00409576 0.01477658 0.00749873 0.02116836 0.02290482 0.00831182 0.00908756 0.00552961] [0.00707855 0.78444284 0.02644767 0.00323374 0.00723403 0.06662466 0.0044095 0.01547857 0.07271864 0.01233181] [0.00437942 0.74885696 0.00826894 0.00602496 0.00500346 0.12277911 0.01369395 0.02507554 0.04552668 0.02039093] [0.0232425 0.7879908 0.02986267 0.00630538 0.02898107 0.05074363 0.01330533 0.02508982 0.02666586 0.00781292] [0.0114577 0.8296496 0.0115037 0.00715359 0.01316599 0.04382329 0.00892284 0.01770865 0.03752023 0.01909443] [0.00473753 0.9275074 0.00624552 0.00851312 0.00523802 0.01203246 0.00739711 0.00570892 0.01543135 0.00718851] [0.00625721 0.8888454 0.00938181 0.00179355 0.00834055 0.03933024 0.00403528 0.01137816 0.02690042 0.00373736] [0.00737982 0.80843985 0.00464619 0.03271857 0.0128434 0.04125416 0.01424174 0.01715372 0.05341195 0.00791057] [0.00773893 0.89554924 0.00915433 0.00998758 0.00468735 0.02875604 0.00740842 0.0106727 0.02294079 0.00310463] [0.01784775 0.877705 0.00478763 0.00786739 0.0065652 0.03932378 0.00757872 0.01397655 0.01821878 0.00612924] [0.01001035 0.8467986 0.00621763 0.00842646 0.0081393 0.03869523 0.00694741 0.02296186 0.04560967 0.00619353] [0.04802814 0.7796271 0.00525954 0.00484046 0.02662858 0.04064631 0.03955888 0.03858631 0.01167758 0.00514702] [0.02222724 0.8309259 0.00894508 0.00542033 0.01345079 0.02875036 0.00932251 0.02291359 0.05453 0.00351425] [0.02141096 0.80789375 0.00618408 0.00482646 0.01716177 0.02120544 0.04051807 0.01542161 0.06094347 0.0044344 ] [0.01382512 0.8334681 0.00300523 0.00316321 0.01798702 0.0409389 0.01737303 0.01670598 0.04358437 0.00994892] [0.01354759 0.8604741 0.00873299 0.00837667 0.01126389 0.04236749 0.01278504 0.01338077 0.02361612 0.00545537] [0.0041901 0.87009513 0.00695205 0.00624051 0.00668875 0.04290214 0.0053453 0.00923959 0.04325743 0.00508897] [0.00934603 0.8843361 0.00344616 0.00381516 0.02066059 0.02095013 0.00942601 0.00855739 0.03551789 0.00394455] [0.08747251 0.67293197 0.01282093 0.00494115 0.02439522 0.08618508 0.02913429 0.02706694 0.05208683 0.00296503] [0.00440555 0.92080075 0.00243548 0.00174514 0.00617614 0.03367404 0.00382944 0.00653758 0.01702501 0.00337087] [0.01106757 0.866973 0.00194298 0.01016787 0.00521821 0.05259069 0.01866922 0.00689655 0.02336792 0.00310601] [0.00557674 0.9253837 0.00155873 0.0066523 0.0064696 0.02434033 0.00483405 0.01062468 0.01112121 0.00343864] [0.01282204 0.874947 0.01167456 0.00831146 0.00924858 0.03171557 0.00915419 0.00860131 0.02765847 0.00586679] [0.02961571 0.8162192 0.02162132 0.00867174 0.01672682 0.02607536 0.01922019 0.01547697 0.03609448 0.01027828] [0.00451186 0.93074876 0.00336316 0.00455846 0.0086705 0.00674731 0.00626797 0.00410306 0.02813517 0.0028938 ] [0.06075916 0.6780492 0.04077061 0.01855227 0.02731367 0.06019364 0.01548951 0.0198524 0.02795696 0.05106254] [0.0083364 0.8979468 0.00374386 0.00352399 0.006744 0.02509773 0.01228982 0.01013939 0.02657083 0.00560723] [0.00733499 0.90076214 0.00250654 0.00257085 0.00521041 0.04835719 0.00739509 0.00674464 0.01669896 0.00241919] [0.00890897 0.8939574 0.01218981 0.00909214 0.00623972 0.03264389 0.00456916 0.00736493 0.02000676 0.00502719] [0.01039846 0.9089709 0.00823812 0.00877768 0.00476929 0.02166645 0.00597249 0.00704145 0.02040092 0.00376428] [0.01932878 0.77770346 0.01337793 0.00776167 0.0140844 0.08638586 0.00460555 0.01673668 0.05440767 0.00560803] [0.02175553 0.8681404 0.0084817 0.01406448 0.00878193 0.04099344 0.00712316 0.00801177 0.02043082 0.00221667] [0.02990238 0.80228543 0.01213762 0.01109637 0.01175891 0.05604009 0.0172079 0.01761002 0.03264941 0.0093119 ] [0.01087898 0.87659943 0.01253777 0.00768241 0.00997824 0.02112672 0.00885477 0.01562314 0.01416378 0.02255468] [0.02155345 0.74111104 0.00677823 0.01346175 0.01926211 0.08460197 0.01898312 0.01338648 0.07592625 0.00493549] [0.00756843 0.873581 0.0061121 0.00180567 0.00493898 0.05726617 0.00660983 0.01024099 0.02187506 0.01000183] [0.0118576 0.7897734 0.00206783 0.00863726 0.01378661 0.03807129 0.0310462 0.02260716 0.07655162 0.00560096] [0.01605191 0.7325048 0.03108088 0.02483236 0.00517426 0.08126966 0.01224676 0.01690279 0.04532863 0.03460802] [0.02112413 0.8219198 0.0124046 0.00865637 0.02027998 0.03792737 0.00681739 0.01519332 0.04450077 0.01117623] [0.0141132 0.82093143 0.00602908 0.00739397 0.01250312 0.02306313 0.01645185 0.01060368 0.08482568 0.00408492] [0.01393295 0.8956165 0.00277618 0.00535475 0.00747472 0.02575232 0.01807712 0.01325624 0.01242268 0.00533652] [0.00608889 0.91892177 0.00350103 0.00536871 0.00484933 0.02240441 0.00617728 0.00673957 0.02295134 0.00299771] [0.01410782 0.8951771 0.00165921 0.0104132 0.01052946 0.01375361 0.01051845 0.01087605 0.03025487 0.00271014] [0.0175628 0.8847983 0.00551149 0.00876634 0.00646503 0.02252354 0.02173802 0.01690369 0.01210967 0.0036212 ] [0.0149859 0.776598 0.01435408 0.00983367 0.00880897 0.06807031 0.01416275 0.00862241 0.08186438 0.00269955]] #_predictions是通过np.argmax计算predictions沿轴axis最大值的索引 _predictions= [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] labels= [ [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]] #_labels是通过np.argmax计算labels沿轴axis最大值的索引 _labels= [1 9 2 3 2 5 9 3 3 1 3 3 2 8 7 4 4 1 2 8 1 6 2 3 6 3 4 2 5 8 1 6 2 3 7 9 5 3 2 2 2 6 2 2 5 1 5 4 7 8 9 6 0 1 2 4 5 6 5 2 1 3 2 1] Minibatch loss at step 0: 4.538851 Minibatch accuracy: 14.1% Minibatch loss at step 0: 4.538851 Minibatch accuracy: 14.1% Model saved in file: model/default.ckpt (base) E:\90.work\AI\DeepLearning\TensorFlow-and-DeepLearning-Tutorial\Season1\20>