Tensorlflow-解决非线性回归问题

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

#使用numpy生成200个随机点,范围从-0.5到0.5均匀分布,增加一个维度得到200行1列的数据(生成二维数据)
x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
#生成随机噪声,形状和x_data相同
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data)+noise

#定义连个placeholder,行不确定,列为1
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])

#定义神经网络中间层
#权值随机数,1行(输入层1个神经元),10列(中间层10个神经元)
Weights_L1 = tf.Variable(tf.random_normal([1,10]))
#10个偏置值
biases_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_b_L1 = tf.matmul(x,Weights_L1)+biases_L1
L1 = tf.nn.tanh(Wx_plus_b_L1)

#定义神经网络输出层
Weights_L2 = tf.Variable(tf.random_normal([10,1]))
#1个偏置值
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,Weights_L2)+biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)

#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess:
  #变量初始化
  sess.run(tf.global_variables_initializer())
  #训练2000次,使用placeholder往x,y 传入x_data,y_data
  for _ in range(2000):
    sess.run(train_step,feed_dict={x:x_data,y:y_data})
  #获得预测值
  prediction_value = sess.run(prediction,feed_dict={x:x_data})
  #画图  
  plt.figure()
  #散点图
  plt.scatter(x_data,y_data)
  #红色的实线,宽度为5
  plt.plot(x_data,prediction_value,'r-',lw=5)
  plt.show()

posted @ 2017-12-03 22:12  爱学英语的程序媛  阅读(1098)  评论(0编辑  收藏  举报