PYTHON DAY6 TENSORFLOW之定义一个有1个隐含层的MLP模型

# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 21:51:45 2020

@author: shilo
"""

#TensorFlow实现一个隐含层的MLP
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#训练数据基础
inputX=np.array([[1.,3.],[1.,2.],[1.,1.5],[1.5,2.0],[2.,3.],[ 2.5,1.5],[2.,1.],[3.,1.],[3.,2.],[3.5,1.],[3.5,1.]])
inputY=[[1.,0.]]*6+[[0.,1.]]*5
#基本参数定义
learning_rate=0.001   #学习率
training_epochs=2000  #训练轮数
display_step=50       #监视步数
n_samples=11          #样本量
batch_size=11         #每批学习大小
total_batch=int(n_samples/batch_size) #总学习批数
#MLP模型定义
n_hidden_1 = 2        # 1号隐含层的神经元数量
n_input = 2             # 输入数据维数
n_classes = 2           # 输出层类数
X = tf.placeholder("float",[None,n_input])  #未知行数、n_input列的二维浮点类型占位符
Y = tf.placeholder("float",[None,n_classes])#未知行数、n_classes列的二维浮点类型占位符
weights = {                                                     # 定义权重字典,
    'h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),   #元素h1是用从正态分布随机数中抽形状为[n_input,n_hidden_1]的随机数为初始化参数的可变变量
    'out':tf.Variable(tf.random_normal([n_hidden_1,n_classes])) #元素out同理
    }
biases = {
    'b1':tf.Variable(tf.random_normal([n_hidden_1])),
    'out':tf.Variable(tf.random_normal([n_classes]))
    }
def multilayer_perceptron(x):       #创建模型
    layer_1=tf.add(tf.matmul(x,weights['h1']),biases['b1'])
    out_layer=tf.matmul(layer_1,weights['out'])+biases['out']
    return out_layer
evidence = multilayer_perceptron(X) #模型
y_=tf.nn.softmax(evidence)          #归一化
cost=tf.reduce_sum(tf.pow(Y-y_,2))/(2*n_samples)
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#学习阶段
avg_set=[]  #每轮的mse
epoch_set=[]#轮数
init=tf.global_variables_initializer()  # 初始化
with tf.Session() as sess:
    sess.run(init)
    
    for epoch in range(training_epochs):
        avg_cost=0.
        for i in range(total_batch):
            #TO BE IMPLEMENTED
            batch_x=inputX
            batch_y=inputY
            _,c=sess.run([optimizer,cost],feed_dict={X:batch_x,Y:batch_y})
            avg_cost+=c/total_batch
        if epoch % display_step ==0:
            print("Epoch:",'%04d' % (epoch+1),
                  "cost={:,.9f}".format(avg_cost))
            avg_set.append(avg_cost)
            epoch_set.append(epoch+1)
            
    print("Training phase finished")
    last_result=sess.run(y_,feed_dict={X:inputX})
    training_cost=sess.run(cost,feed_dict={X:inputX,Y:inputY})
    print("Training cost =",training_cost)
    print("Last result =",last_result)
#结果画图
plt.plot(epoch_set,avg_set,'o',label="MLP Training phase")
plt.ylabel('cost')
plt.xlabel('epochs')
plt.legend()
plt.show()
#测试集
testX=np.array([[1.,2.25],[1.25,3.],[2.,2.5],[2.25,2.75],[2.5,3.],[2.,0.9],[2.5,1.2],[3.,1.25],[3.,1.5],[3.5,2.],[3.5,2.5]])
testY=[[1.,0.]]*5+[[0.,1.]]*6
with tf.Session() as sess:
    sess.run(init)
    
    for epoch in range(training_epochs):
        for i in range(total_batch):
            batch_x=inputX
            batch_y=inputY
            _,c=sess.run([optimizer,cost],
                         feed_dict={X:batch_x,Y:batch_y})
        
    #test
    pred=tf.nn.softmax(evidence)
    result=sess.run(pred,
                    feed_dict={X:testX})
    correct_prediction=tf.equal(tf.argmax(pred,1),tf.argmax(Y,1))
        
    #accuracy
    accuracy=tf.reduce_mean(tf.cast(correct_prediction,"float"))
    print("Accuracy:" , accuracy.eval({X:testX,Y:testY}))
    print(result)
#plt
yc=result[:,1]
print(yc)
plt.scatter(testX[:,0],testX[:,1],c=yc,s=50,alpha=1)
plt.show()

  

posted @ 2020-10-30 23:13  Shilo  阅读(199)  评论(0编辑  收藏  举报