每日笔记-LSTM
今天,搞了一段代码,但没有达到应有的效果
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | import torch import torch.nn as nn import numpy as np # 设置随机种子以便结果可重复 torch.manual_seed( 42 ) # 定义一个更复杂的LSTM模型 class ComplexLSTMModel(nn.Module): def __init__( self , input_size, hidden_size, output_size): super (ComplexLSTMModel, self ).__init__() self .hidden_size = hidden_size self .lstm = nn.LSTM(input_size, hidden_size, num_layers = 2 ) # 增加了一个LSTM层 self .fc = nn.Linear(hidden_size, output_size) def forward( self , input ): lstm_out, _ = self .lstm( input .view( len ( input ), 1 , - 1 )) output = self .fc(lstm_out.view( len ( input ), - 1 )) return output[ - 1 ] # 准备数据 input_size = 1 hidden_size = 8 output_size = 1 lr = 0.01 num_epochs = 100 # 生成一些示例数据 data = np.sin(np.arange( 0 , 100 , 0.1 )) # 定义模型、损失函数和优化器 model = ComplexLSTMModel(input_size, hidden_size, output_size) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr = lr) # 训练模型 for epoch in range (num_epochs): inputs = torch.Tensor(data[: - 1 ]).view( - 1 , 1 , 1 ) labels = torch.Tensor(data[ 1 :]) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() if (epoch + 1 ) % 10 = = 0 : print (f 'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}' ) # 预测 with torch.no_grad(): future = 100 # 用训练数据最后一个数据点开始预测 pred_data = [data[ - 1 ]] for _ in range (future): inputs = torch.Tensor([pred_data[ - 1 ]]).view( - 1 , 1 , 1 ) pred = model(inputs) pred_data.append(pred.item()) # 绘制结果 import matplotlib.pyplot as plt plt.plot(data, label = 'Original data' ) plt.plot(np.arange( len (data) - 1 , len (data) + future), pred_data, label = 'Predictions' ) plt.legend() plt.show() |
结果如图:
没有达到想象的效果
-----------------------------------
仔细阅读后,找到了问题症结所在。
修改后的代码:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | import torch import torch.nn as nn import numpy as np torch.manual_seed( 42 ) class LSTMModel(nn.Module): def __init__( self , input_size, hidden_size, output_size): super (LSTMModel, self ).__init__() self .hidden_size = hidden_size self .lstm = nn.LSTM(input_size, hidden_size, num_layers = 2 ) self .fc = nn.Linear(hidden_size, output_size) def forward( self , input ): lstm_out, self .hidden = self .lstm( input .view( len ( input ), 1 , - 1 )) output = self .fc(lstm_out.view( len ( input ), - 1 )) return output[ - 1 ] input_size = 1 hidden_size = 16 output_size = 1 lr = 0.001 num_epochs = 100 data = np.sin(np.arange( 0 , 100 , 0.1 )) # 制作训练数据 inputs = [] labels = [] for i in range ( 20 , len (data)): inputs.append(data[i - 20 :i]) labels.append(data[i]) model = LSTMModel(input_size, hidden_size, output_size) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr = lr) for epoch in range (num_epochs): for i in range ( len (inputs) - 100 ): optimizer.zero_grad() outputs = model(torch.Tensor(inputs[i]).view( - 1 , 1 , 1 )) loss = criterion(outputs, torch.Tensor([labels[i]])) loss.backward() optimizer.step() if (epoch + 1 ) % 1 = = 0 : print (f 'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.6f}' ) # 预测 pred_data = [] with torch.no_grad(): for i in range ( len (inputs) - 100 , len (inputs)): outputs = model(torch.Tensor(inputs[i]).view( - 1 , 1 , 1 )) pred_data.append(outputs.item()) import matplotlib.pyplot as plt plt.plot(data[: - 100 ], label = 'Original data' ) plt.plot(np.arange( len (data) - 100 , len (data)), pred_data, label = 'Predictions' ) plt.legend() plt.show() |
显示图像正常 !
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 分享一个免费、快速、无限量使用的满血 DeepSeek R1 模型,支持深度思考和联网搜索!
· 基于 Docker 搭建 FRP 内网穿透开源项目(很简单哒)
· ollama系列1:轻松3步本地部署deepseek,普通电脑可用
· 按钮权限的设计及实现
· 【杂谈】分布式事务——高大上的无用知识?
2023-02-20 ctp认证权限