tensorflow 学习笔记 多层感知机

 1 # -*- coding: utf-8 -*-
 2 """
 3 Created on Thu Mar  9 19:20:51 2017
 4 
 5 @author: Jarvis
 6 """
 7 '''
 8 tnesorflow 做机器学习的几个步骤
 9 1.定义公式
10 2.定义loss function,选择优化器,并制定优化器优化loss
11 3.迭代地对数据进行训练
12 4。在测试集或验证集对准确率进行评测
13 
14 
15 '''
16 
17 import tensorflow as tf
18 import pandas as pd
19 import random
20 #自己定义的一个选取batch进行训练的一个取batch函数
21 def next_batch(mnist, num,ilen = 55):
22     size = len(mnist)
23     selected_n = set([])
24 
25     while(len(selected_n) < num):
26         t = random.choice(range(size))
27         selected_n.add(t)
28     l  = list(selected_n)
29     
30     batch_xs = []
31     batch_ys = []
32     
33     batch_xs = mnist.iloc[l,range(2,54)]
34     
35     batch_ys = mnist.iloc[l,range(54,62)]
36     return batch_xs,batch_ys
37 
38 
39 #对数据进行读取 
40 org_mnist = pd.read_csv("NDVI_NDWI.csv",header = None,encoding = 'gbk')
41 mnist = pd.get_dummies(org_mnist)
42 #创建session
43 #input_data.read_data_sets("MNIST_data/",one_hot = True)
44 sess = tf.InteractiveSession()
45 
46 
47 #定义算法公式,在此处就是神经网络的结构方式
48 in_units = 52#每一条instance具有52个输入
49 h1_units = 30
50 h2_units = 20
51 h3_units = 10
52 h4_units = 5
53 
54 #tf.truncated_normal是正态分布的一个东东,主要用于初始化一些W矩阵
55 W1 = tf.Variable(tf.truncated_normal([in_units,h1_units],stddev = 0.1))
56 b1 = tf.Variable(tf.zeros([h1_units]))
57 W2 = tf.Variable(tf.zeros([h1_units,h2_units]))#[h1_units,8]
58 b2 = tf.Variable(tf.zeros([h2_units]))#10
59 W3 = tf.Variable(tf.zeros([h2_units,h3_units]))
60 b3 = tf.Variable(tf.zeros([h3_units]))
61 W4 = tf.Variable(tf.zeros([h3_units,8]))
62 b4 = tf.Variable(tf.zeros([8]))
63 
64 '''
65 W4 = tf.Variable(tf.zeros([h3_units,h4_units]))
66 b4 = tf.Variable(tf.zeros([h4_units]))
67 W5 = tf.Variable(tf.zeros([h4_units,8]))
68 b5 = tf.Variable(tf.zeros([8]))
69 '''
70 x = tf.placeholder(tf.float32,[None, in_units])
71 keep_prob = tf.placeholder(tf.float32)#dropout 的比例 keep_prob
72 
73 hidden1 = tf.nn.sigmoid(tf.matmul(x,W1)+b1)
74 hidden1_drop = tf.nn.dropout(hidden1,keep_prob)
75 hidden2 = tf.nn.sigmoid(tf.matmul(hidden1_drop,W2)+b2)
76 hidden2_drop = tf.nn.dropout(hidden2,keep_prob)
77 hidden3 = tf.nn.sigmoid(tf.matmul(hidden2_drop,W3)+b3)
78 hidden3_drop = tf.nn.dropout(hidden3,keep_prob)
79 #hidden4 = tf.nn.sigmoid(tf.matmul(hidden3_drop,W4)+b4)
80 #hidden4_drop = tf.nn.dropout(hidden4,keep_prob)
81 
82 y = tf.nn.softmax(tf.matmul(hidden3_drop,W4)+b4)
83 y_ = tf.placeholder(tf.float32,[None,8])#[None,10]
84 #设置优化函数
85 cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1]))
86 train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
87 
88 tf.global_variables_initializer().run()
89 
90 
91 for i in range(2010):#2000
92     batch_xs, batch_ys = next_batch(mnist,1000)#1000 3
93     train_step.run( {x : batch_xs, y_ : batch_ys,keep_prob: 1})
94 
95 correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
96 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
97 batch_xs, batch_ys = next_batch(mnist,10000)
98 print(accuracy.eval({x:batch_xs,y_:batch_ys,keep_prob:1.0}))
View Code

 

posted @ 2017-03-15 19:09  不说话的汤姆猫  阅读(565)  评论(0编辑  收藏  举报