手写RNN前向计算 VS Pytorch.RNN
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | batch_size , seq ,num_layers = 2 , 3 , 2 input_size , hidden_size = 2 , 3 input = torch.randn(batch_size, seq, input_size) h_0 = torch.zeros(num_layers,batch_size,hidden_size) # 调用Pytorch RNN rnn = nn.RNN(input_size,hidden_size,num_layers, batch_first = True ) output , h_n = rnn( input , h_0) print ( 'Pytorch RNN:' ) print (output) # 显示Pytorch RNN 权重weights和偏置bias # for name, value in rnn.named_parameters(): # print(name) # print(value) # rnn_forward计算 def rnn_forward( input ,weight_ih,weight_hh,bias_ih,bias_hh,h_0): batch_size, seq, input_size = input .shape hidden_size = weight_ih.shape[ 0 ] hout_layer = torch.zeros(batch_size, seq, hidden_size) ''' 计算矩阵的行列数 weight_ih: input_size*hidden_size weight_hh: hidden_size*hidden_size bias_ih,bias_hh:hidden_size ''' for t in range (seq): x = input [:,t,:] times_ih = torch.mm(weight_ih,torch.t(x)) times_hh = torch.mm(weight_hh,torch.t(h_0.squeeze( 0 ))) h_0 = torch.tanh(torch.t(times_ih) + bias_ih + torch.t(times_hh) + bias_hh) hout_layer[:,t,:] = h_0 #h_0 的行列数为batch_size*hidden_size return hout_layer ,h_0 # 第一层输出 hout_layer0,h0_layer0 = rnn_forward( input ,rnn.weight_ih_l0,rnn.weight_hh_l0,rnn.bias_ih_l0,rnn.bias_hh_l0,h_0[ 0 ,:,:]) # 第二层输出 hout_layer1,h0_layer1 = rnn_forward(hout_layer0,rnn.weight_ih_l1,rnn.weight_hh_l1,rnn.bias_ih_l1,rnn.bias_hh_l1,h_0[ 1 ,:,:]) h_0[ 0 ,:,:] = h0_layer0 h_0[ 1 ,:,:] = h0_layer1 print ( 'RNN_forward:' ) print (hout_layer1) |
分类:
Python
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· winform 绘制太阳,地球,月球 运作规律
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)
· 超详细:普通电脑也行Windows部署deepseek R1训练数据并当服务器共享给他人