手写RNN前向计算 VS Pytorch.RNN

batch_size , seq ,num_layers = 2,3,2
input_size , hidden_size = 2,3
input = torch.randn(batch_size, seq, input_size) 
h_0 = torch.zeros(num_layers,batch_size,hidden_size)
# 调用Pytorch RNN
rnn = nn.RNN(input_size,hidden_size,num_layers, batch_first=True)
output , h_n = rnn(input, h_0)
print('Pytorch RNN:')
print(output)
# 显示Pytorch RNN 权重weights和偏置bias
# for name, value in rnn.named_parameters():
# 	print(name)
# 	print(value)

# rnn_forward计算
def rnn_forward(input,weight_ih,weight_hh,bias_ih,bias_hh,h_0):
	batch_size, seq, input_size = input.shape
	hidden_size = weight_ih.shape[0]
	hout_layer = torch.zeros(batch_size, seq, hidden_size)
	'''
	计算矩阵的行列数
	weight_ih: input_size*hidden_size
	weight_hh: hidden_size*hidden_size
	bias_ih,bias_hh:hidden_size
	'''
	for t in range(seq):
		x= input[:,t,:]
		times_ih = torch.mm(weight_ih,torch.t(x))
		times_hh = torch.mm(weight_hh,torch.t(h_0.squeeze(0)))
		h_0 = torch.tanh(torch.t(times_ih)+bias_ih+torch.t(times_hh)+bias_hh)
		hout_layer[:,t,:] = h_0
		#h_0 的行列数为batch_size*hidden_size

	return hout_layer ,h_0


# 第一层输出
hout_layer0,h0_layer0 = rnn_forward(input,rnn.weight_ih_l0,rnn.weight_hh_l0,rnn.bias_ih_l0,rnn.bias_hh_l0,h_0[0,:,:])
# 第二层输出
hout_layer1,h0_layer1 = rnn_forward(hout_layer0,rnn.weight_ih_l1,rnn.weight_hh_l1,rnn.bias_ih_l1,rnn.bias_hh_l1,h_0[1,:,:])

h_0[0,:,:] = h0_layer0
h_0[1,:,:] = h0_layer1
print('RNN_forward:')
print(hout_layer1)

  

posted @ 2022-04-27 11:30  华小电  阅读(50)  评论(0编辑  收藏  举报