TensorFlow之 随机训练和批训练的比较与实现

  一、随机训练和批训练

  随机训练:一次随机抽样训练数据和目标数据对完成训练。

  批训练:一次大批量训练取平均损失来进行梯度计算,批量训练大小可以一次上扩到整个数据集。

  批训练和随机训练的差异:优化器方法和收敛的不同

  批训练的难点在于:确定合适的batch_size

  二者比较

  训练类型  优点  缺点

  随机训练  脱离局部最小  一般需要更多的迭代次数才收敛

  批训练  快速得到最小损失  耗费更多的计算资源

  二、实现随机训练

  import numpy as np

  import tensorflow as tf

  import matplotlib.pyplot as plt

  from tensorflow.python.framework import ops

  ops.reset_default_graph()

  # 一、随机训练:

  # 1.创建计算图

  sess = tf.Session()

  # 2.创建数据

  x_vals = np.random.normal(1, 0.1, 100)

  y_vals = np.repeat(10., 100)

  x_data = tf.placeholder(shape=[1], dtype=tf.float32)

  y_target = tf.placeholder(shape=[1], dtype=tf.float32)

  # 3.创建变量

  A = tf.Variable(tf.random_normal(shape=[1]))

  # 4.增加图操作

  my_output = tf.multiply(x_data, A)

  # 5.声明L2正则损失

  loss = tf.square(my_output - y_target)

  # 6.声明优化器 学习率为0.02

  my_opt = tf.train.GradientDescentOptimizer(0.02)

  train_step = my_opt.minimize(loss)

  # 7.初始化变量

  init = tf.global_variables_initializer()

  sess.run(init)

  # 8.保存loss数据用于绘图

  loss_stochastic = []

  # 9.开始训练

  for i in range(100):

  rand_index = np.random.choice(100)

  rand_x = [x_vals[rand_index]]

  rand_y = [y_vals[rand_index]]

  sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})

  if (i+1)%5==0:

  print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)))

  temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})

  print('Loss = ' + str(temp_loss))

  loss_stochastic.append(temp_loss)

  # 输出结果

  Step #5 A = [2.0631378]

  Loss = [60.90259]

  Step #10 A = [3.560384]

  Loss = [35.39518]

  Step #15 A = [4.7225595]

  Loss = [37.812637]

  Step #20 A = [5.681144]

  Loss = [13.796157]

  Step #25 A = [6.4919457]

  Loss = [13.752169]

  Step #30 A = [7.1609416]

  Loss = [9.70855]

  Step #35 A = [7.710085]

  Loss = [5.826261]

  Step #40 A = [8.253489]

  Loss = [7.3934216]

  Step #45 A = [8.671478]

  Loss = [2.5475926]

  Step #50 A = [8.993064]

  Loss = [1.32571]

  Step #55 A = [9.101872]

  Loss = [0.67589337]

  Step #60 A = [9.256593]

  Loss = [5.34419]

  Step #65 A = [9.329251]

  Loss = [0.58555096]

  Step #70 A = [9.421848]

  Loss = [3.088755]

  Step #75 A = [9.563117]

  Loss = [6.0601945]

  Step #80 A = [9.661991]

  Loss = [0.05205128]

  Step #85 A = [9.8208685]

  Loss = [2.3963788]

  Step #90 A = [9.8652935]

  Loss = [0.19284673]

  Step #95 A = [9.842097]

  Loss = [4.9211507]

  Step #100 A = [10.044914]

  Loss = [4.2354054]

  三、实现批训练

  import numpy as np

  import tensorflow as tf

  import matplotlib as plt

  from tensorflow.python.framework import ops

  ops.reset_default_graph()

  sess = tf.Session()

  # 1.声明批量大小(一次传入多少训练数据)

  batch_size = 20

  # 2.声明模型的数据、占位符和变量。

  # 这里能做的是改变占位符的形状,占位符有两个维度:

  # 第一个维度为None,第二个维度是批量训练中的数据量。

  # 我们能显式地设置维度为20,也能设为None。

  # 我们必须知道训练模型中的维度,从而阻止不合法的矩阵操作

  x_vals = np.random.normal(1,0.1,100)

  y_vals = np.repeat(10.,100)

  x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32)

  y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)

  A = tf.Variable(tf.random_normal(shape=[1,1]))

  # 3.现在在计算图中增加矩阵乘法操作,

  # 切记矩阵乘法不满足交换律,所以在matmul()函数中的矩阵参数顺序要正确:

  my_output = tf.multiply(x_data, A)

  # 4.改变损失函数

  # 批量训练时损失函数是每个数据点L2损失的平均值

  loss = tf.reduce_mean(tf.square(my_output - y_target))

  # 5.声明优化器

  my_opt = tf.train.GradientDescentOptimizer(0.02)

  train_step = my_opt.minimize(loss)

  # 6.在训练中通过循环迭代优化模型算法。

  # 为了绘制损失值图与随机训练对比

  # 这里初始化一个列表每间隔5次迭代保存损失函数

  # 初始化变量

  init = tf.global_variables_initializer()

  sess.run(init)

  loss_batch = []

  for i in range(100):

  # 每次用0~100中取20个数作为索引值

  rand_index = np.random.choice(100, size=batch_size)

  # 转置郑州哪里做人流好 http://www.kdrlyy.com/

  rand_x = np.transpose([x_vals[rand_index]])

  rand_y = np.transpose([y_vals[rand_index]])

  sess.run(train_step, feed_dict={x_data:rand_x,y_target:rand_y})

  if (i+1)%5 == 0:

  print("Step # " + str(i+1) + ' A = ' + str(sess.run(A)))

  temp_loss = sess.run(loss, feed_dict={x_data:rand_x,y_target:rand_y})

  print('Loss = ' + str(temp_loss))

  loss_batch.append(temp_loss)

  在这里插入代码片

  # 输出结果

  Step # 5 A = [[2.626382]]

  Loss = 55.444374

  Step # 10 A = [[3.980196]]

  Loss = 36.855064

  Step # 15 A = [[5.0858808]]

  Loss = 22.765038

  Step # 20 A = [[5.9751787]]

  Loss = 15.496961

  Step # 25 A = [[6.713659]]

  Loss = 12.349718

  Step # 30 A = [[7.2950797]]

  Loss = 7.5467796

  Step # 35 A = [[7.782353]]

  Loss = 5.17468

  Step # 40 A = [[8.20625]]

  Loss = 4.1199327

  Step # 45 A = [[8.509094]]

  Loss = 2.6329637

  Step # 50 A = [[8.760488]]

  Loss = 1.9998455

  Step # 55 A = [[8.967735]]

  Loss = 1.6577679

  Step # 60 A = [[9.1537]]

  Loss = 1.4356906

  Step # 65 A = [[9.317189]]

  Loss = 1.9666836

  Step # 70 A = [[9.387019]]

  Loss = 1.9287064

  Step # 75 A = [[9.499526]]

  Loss = 1.7477573

  Step # 80 A = [[9.594302]]

  Loss = 1.719229

  Step # 85 A = [[9.666611]]

  Loss = 1.4769726

  Step # 90 A = [[9.711805]]

  Loss = 1.1235845

  Step # 95 A = [[9.784608]]

  Loss = 1.9176414

  Step # 100 A = [[9.849552]]

  Loss = 1.1561565

  四、绘制图像

  plt.plot(range(0, 100, 5), loss_stochastic, 'b-', label='Stochastic Loss')

  plt.plot(range(0, 100, 5), loss_batch, 'r--', label='Batch Loss, size=20')

  plt.legend(loc='upper right', prop={'size': 11})

  plt.show()

  从图中可以看出批训练损失更平滑,随机训练损失更不规则

posted @ 2020-03-13 14:55  网管布吉岛  阅读(426)  评论(0编辑  收藏  举报