定义add_layer添加层

 1 def add_layer(inputs, in_size, activation_funcation=None):
 2     Weights = tf.Variable(tf.random_normal([in_size, out_size]))
 3     #Weights用随机数比全为0时的效果更好;定义其格式为:行是in_size行,列是out_size列
 4     biases = tf.Variable(tf.zeros[1,out_size]+0.1)
 5     #biases不允许为0,所以+0.1
 6     Wx_plus_b = tf.matmul(inputs, Weights) + biases    #y=Wx+b
 7     
 8     if activation_function is None:
 9         outputs = Wx_plus_b        #如果无激励函数,就是线性的
10     else:
11         outputs = activation_function(Wx_plus_b)        #调用激励函数
12         return outputs

 

数据

1 x_data = np.linspace(-1,1,300)[:,np.newaxis]    #x_data有300个(-1,1)的例子,最后增加一个新维度
2 noise = np.random.normal(0,0.05,x_data.shape)    #均值为0.方差为0.05,noise(噪点)的格式和x_data一样
3 y_data = np.square(x_data)-0.5+noise    #np.square平方

 

方法

1 xs = tf.placeholder(tf.float32,[None,1])    #一次一个x_data;None-无论给多少个值都ok
2 ys = tf.placeholder(tf.float32,[None,1])

 

 1 1l=add_layers(xs,1,10,activation_function=tf.nn.relu)    
 2 #输入层,输入的是xs,in_size=1,out_size=10,activation_function激励函数是relu
 3 prediction = add_layer(1l,10,1,activation_function=None)    #隐藏层
 4 
 5 loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction), 
 6        reduction_indices=[1]))    
 7 
 8 #reduce_sum对所有例子的loss求和再reduce_mean求平均
 9 
10 train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)    #学习效率要<1;Optimizer是优化器

 

1 init = tf.initialize_all_variables()    #初始
2 sess = tf.Session()
3 sess.run(init)

 

执行

1 sess.run(init)
2 for i in range(1000):
3     sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
4     if i%50 ==0:    #每50显示一下loss
5         print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

 

posted on 2022-08-10 11:03  Jolyne123  阅读(19)  评论(0编辑  收藏  举报