RNN 的 BP —— Back Propagation Through Time.

参考:零基础入门深度学习(5) - 循环神经网络知乎

 

 1   def backward(self, sensitivity_array, 
 2                  activator):
 3         '''
 4         实现BPTT算法
 5         '''
 6         self.calc_delta(sensitivity_array, activator)
 7         self.calc_gradient()
 8     def calc_delta(self, sensitivity_array, activator):
 9         self.delta_list = []  # 用来保存各个时刻的误差项
10         for i in range(self.times):
11             self.delta_list.append(np.zeros(
12                 (self.state_width, 1)))
13         self.delta_list.append(sensitivity_array)
14         # 迭代计算每个时刻的误差项
15         for k in range(self.times - 1, 0, -1):
16             self.calc_delta_k(k, activator)
17     def calc_delta_k(self, k, activator):
18         '''
19         根据k+1时刻的delta计算k时刻的delta
20         '''
21         state = self.state_list[k+1].copy()
22         element_wise_op(self.state_list[k+1],
23                     activator.backward)
24         self.delta_list[k] = np.dot(
25             np.dot(self.delta_list[k+1].T, self.W),
26             np.diag(state[:,0])).T
27     def calc_gradient(self):
28         self.gradient_list = [] # 保存各个时刻的权重梯度
29         for t in range(self.times + 1):
30             self.gradient_list.append(np.zeros(
31                 (self.state_width, self.state_width)))
32         for t in range(self.times, 0, -1):
33             self.calc_gradient_t(t)
34         # 实际的梯度是各个时刻梯度之和
35         self.gradient = reduce(
36             lambda a, b: a + b, self.gradient_list,
37             self.gradient_list[0]) # [0]被初始化为0且没有被修改过
38     def calc_gradient_t(self, t):
39         '''
40         计算每个时刻t权重的梯度
41         '''
42         gradient = np.dot(self.delta_list[t],
43             self.state_list[t-1].T)
44         self.gradient_list[t] = gradient

 

 1 class RNN2(RNN1):
 2     # 定义 Sigmoid 激活函数
 3     def activate(self, x):
 4         return 1 / (1 + np.exp(-x))
 5 
 6     # 定义 Softmax 变换函数
 7     def transform(self, x):
 8         safe_exp = np.exp(x - np.max(x))
 9         return safe_exp / np.sum(safe_exp)
10 
11     def bptt(self, x, y):
12         x, y, n = np.asarray(x), np.asarray(y), len(y)
13         # 获得各个输出,同时计算好各个 State
14         o = self.run(x)
15         # 照着公式敲即可 ( σ'ω')σ
16         dis = o - y
17         dv = dis.T.dot(self._states[:-1])
18         du = np.zeros_like(self._u)
19         dw = np.zeros_like(self._w)
20         for t in range(n-1, -1, -1):
21             st = self._states[t]
22             ds = self._v.T.dot(dis[t]) * st * (1 - st)
23             # 这里额外设定了最多往回看 10 步
24             for bptt_step in range(t, max(-1, t-10), -1):
25                 du += np.outer(ds, x[bptt_step])
26                 dw += np.outer(ds, self._states[bptt_step-1])
27                 st = self._states[bptt_step-1]
28                 ds = self._w.T.dot(ds) * st * (1 - st)
29         return du, dv, dw
30 
31     def loss(self, x, y):
32         o = self.run(x)
33         return np.sum(
34             -y * np.log(np.maximum(o, 1e-12)) -
35             (1 - y) * np.log(np.maximum(1 - o, 1e-12))
36         )

 

posted on 2017-12-23 23:22  MicN  阅读(363)  评论(0编辑  收藏  举报