机器学习之线性回归示例

import tensorflow as tf
import numpy as np


# y = W1*x1 + W2*x2 + B
W = tf.Variable(np.zeros([2, 1]), dtype=np.float32)
B = tf.Variable(np.zeros([1, 1]), dtype=np.float32)

x = tf.placeholder(tf.float32, shape=[1, 2])
y = tf.placeholder(tf.float32, shape=[1, 1])

# W ->[[5], [3]]  B -> [[0]]
# 注意矩阵乘法的规则,x与W换顺序都不行
y_ = tf.matmul(x, W) + B X = np.array([[34, 46], [78, 34], [66, 88], [56, 83], [85, 30], [35, 45], [73, 93], [54, 67], [47, 45], [76, 35], [22, 89], [12, 46], [56, 34], [89, 74], [45, 41], [23, 23], [45, 85], [63, 43], [64, 78], [23, 58], [99, 77], [44, 66], [88, 67], [45, 11], [46, 12], [47, 13], [48, 14], [49, 15], [50, 16], [51, 17], [52, 18], [53, 19], [54, 20], [55, 21], [56, 22], [57, 23], [58, 24], [59, 25], [60, 26], [61, 27], [62, 28], [63, 29], [64, 30], [65, 31], [66, 32], [67, 33], [68, 34], [69, 35], [70, 36], [71, 37], [72, 38], [73, 39], [74, 40], [75, 41], [76, 42], [77, 43], [78, 44], [79, 45], [80, 46], [81, 47], [82, 48], [83, 49], [84, 50], [85, 51], [86, 52], [87, 53]]) Y = np.array([[308], [492], [594], [529], [515], [310], [644], [471], [370], [485], [377], [198], [382], [667], [348], [184], [480], [444], [554], [289], [726], [418], [641], [258], [266], [274], [282], [290], [298], [306], [314], [322], [330], [338], [346], [354], [362], [370], [378], [386], [394], [402], [410], [418], [426], [434], [442], [450], [458], [466], [474], [482], [490], [498], [506], [514], [522], [530], [538], [546], [554], [562], [570], [578], [586], [594]]) cost = tf.square(y-y_)
# 调节学习率,学习的效果差别非常大 train
= tf.train.GradientDescentOptimizer(0.0001).minimize(cost) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(Y.size): vx = X[i].reshape(1, 2) vy = Y[i].reshape(1, 1) feed = {x: vx, y: vy} sess.run(train, feed_dict=feed)print("After %d iteration:" % i) print(sess.run(W)) print(sess.run(B)) print("cost: %f" % sess.run(cost, feed_dict=feed))

 

posted @ 2017-11-16 16:09  cbing  阅读(612)  评论(0编辑  收藏  举报