LSTM 预测股票

pytorch实现LSTM对股票的预测

参考:https://blog.csdn.net/mary19831/article/details/129570030


输入:前十天的收盘价 x: [bathc_size, seq_len, input_size] = [bs, 10, 1]
输出:下一天的收盘价 y: [batch_size, 1]
原博主给的代码输入特征是10维的,也就是认为 seq_len = 1, input_size = 10。
我这里做了修改,以便输入多维的特征(比如前10天的 收盘价,成交量... )

结果

左边是所有数据的预测结果(中间有条绿色的分割线 左边是训练数据 右边是测试数据的输出),右图是仅在测试集上预测的结果,可以看出对于测试集的预测,该模型是滞后的。
image

代码

import matplotlib.pyplot as plt
import numpy as np
import tushare as ts
import pandas as pd
import torch
from torch import nn
import datetime
from torch.utils.data import DataLoader, Dataset
import time

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class LSTM_Regression(nn.Module): 
	def __init__(self, input_size, hidden_size, output_size=1, num_layers=2):
		super().__init__()

		self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
		self.fc = nn.Linear(hidden_size, output_size)

	def forward(self, _x):
		x, _ = self.lstm(_x)  # (batch_size, seq_len, input_size) -> (batch_size, seq_len, hidden_size)
		x = x[:, -1, :] 
		x = self.fc(x)
		return x


def create_dataset(data, days_for_train=5):
	"""
		根据给定的序列data,生成数据集
		数据集分为输入和输出,每一个输入的长度为days_for_train,每一个输出的长度为1。
		也就是说用days_for_train天的数据,对应下一天的数据。
		若给定序列的长度为d,将输出长度为(d-days_for_train+1)个输入/输出对
	"""
	dataset_x, dataset_y = [], []
	for i in range(len(data) - days_for_train):
		_x = data[i:(i + days_for_train)]
		dataset_x.append(_x)
		dataset_y.append(data[i + days_for_train])
	return (np.array(dataset_x), np.array(dataset_y))


if __name__ == '__main__':
	days_for_train = 10
	batch_size = 1

	# 读取数据
	t0 = time.time()
	data_close = ts.get_k_data('000001', start='2019-01-01', index=True)['close']  # 取上证指数的收盘价
	data_close.to_csv('000001.csv', index=False) #将下载的数据转存为.csv格式保存
	data_close = pd.read_csv('000001.csv')  # 读取文件
	data_close = data_close.astype('float32').values  # 转换数据类型

	# 将价格标准化到0~1
	max_value = np.max(data_close)
	min_value = np.min(data_close)
	data_close = (data_close - min_value) / (max_value - min_value)

	# 划分训练集和测试集,70%作为训练集
	dataset_x, dataset_y = create_dataset(data_close, days_for_train)  
	train_size = int(len(dataset_x) * 0.7)
	train_x = dataset_x[:train_size]
	train_y = dataset_y[:train_size]
	test_x = dataset_x[train_size:]
	test_y = dataset_y[train_size:]

	# 转为pytorch的tensor对象
	train_x = torch.from_numpy(train_x).to(device)
	train_y = torch.from_numpy(train_y).to(device)
	test_x = torch.from_numpy(test_x).to(device)
	test_y = torch.from_numpy(test_y).to(device)
	print("train_x.shape: ", train_x.shape)
	print("train_y.shape: ", train_y.shape)
	print("test_x.shape: ", test_x.shape)
	print("test_y.shape: ", test_y.shape)

	model = LSTM_Regression(input_size=1, hidden_size=8, output_size=1, num_layers=2)  # 导入模型并设置模型的参数输入输出层、隐藏层等
	model = model.to(device)
	train_loss = []
	loss_function = nn.MSELoss()
	optimizer = torch.optim.Adam(model.parameters(), lr=1e-5, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)

	# for i in range(60):
	#     for b in range(0, len(train_x), batch_size):
	#         batch_x = train_x[b:b+batch_size]
	#         batch_y = train_y[b:b+batch_size]

	#         # 前向传播
	#         outputs = model(batch_x)
	#         loss = loss_function(outputs, batch_y)
	#         loss.backward()
	#         optimizer.step()
	#         optimizer.zero_grad()
	#         train_loss.append(loss.item())

	#     # 将训练过程的损失值写入文档保存,并在终端打印出来
	#     with open('log.txt', 'a+') as f:
	#         f.write('{} - {}\n'.format(i + 1, loss.item()))
	#     if (i + 1) % 1 == 0:
	#         print('Epoch: {}, Loss:{:.5f}'.format(i + 1, loss.item()))

	# # 画loss曲线
	# plt.figure()
	# plt.plot(train_loss, 'b', label='loss')
	# plt.title("Train_Loss_Curve")
	# plt.ylabel('train_loss')
	# plt.xlabel('epoch_num')
	# plt.savefig('loss.png', format='png', dpi=200)
	# plt.close()

	# torch.save(model.state_dict(), 'model_params.pkl')  # 可以保存模型的参数供未来使用

	# t1 = time.time()
	# T = t1 - t0
	# print('The training time took %.2f' % (T / 60) + ' mins.')

	# for test
	model = model.eval()  # 转换成评估模式
	model.load_state_dict(torch.load('model_params.pkl'))  # 加载训练好的模型参数
	pred_test = model(test_x)
	pred_test = pred_test.view(-1).data.cpu().numpy()
	test_y = test_y.data.cpu().numpy()
	assert len(pred_test) == len(test_y)

	plt.plot(pred_test, 'r', label='prediction')
	plt.plot(test_y, 'b', label='real')
	plt.legend(loc='best')
	plt.savefig('test.png', format='png', dpi=200)
	plt.close()

	# plot for all data
	dataset_x = dataset_x.reshape(-1, days_for_train, 1)
	dataset_x = torch.from_numpy(dataset_x).to(device)

	pred_test = model(dataset_x)
	pred_test = pred_test.view(-1).data.cpu().numpy()
	pred_test = np.concatenate((np.zeros(days_for_train), pred_test))  # 填充0 使长度相同
	assert len(pred_test) == len(data_close)

	plt.plot(pred_test, 'r', label='prediction')
	plt.plot(data_close, 'b', label='real')
	plt.plot((train_size, train_size), (0, 1), 'g--')  # 分割线 左边是训练数据 右边是测试数据的输出
	plt.legend(loc='best')
	plt.savefig('result.png', format='png', dpi=200)
	plt.close()
posted @   小··明  阅读(51)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· DeepSeek 开源周回顾「GitHub 热点速览」
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· AI与.NET技术实操系列(二):开始使用ML.NET
· .NET10 - 预览版1新功能体验(一)
点击右上角即可分享
微信分享提示