Transformer 例子2

一个多维数据输入的例子:

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt

# 构造简单的多维时间序列数据集
def generate_multivariate_time_series(num_samples, seq_length, input_dim):
    data = np.random.randn(num_samples, seq_length, input_dim)
    labels = np.sum(data, axis=1)[:, 0]  # 简单地将所有维度的值相加作为标签
    return data, labels

# 定义 Transformer 模型
class TransformerModel(nn.Module):
    def __init__(self, input_dim, output_dim, num_layers, heads, hidden_size):
        super(TransformerModel, self).__init__()
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=input_dim, nhead=heads, dim_feedforward=hidden_size)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
        self.decoder = nn.Linear(input_dim, output_dim)

    def forward(self, src):
        src = src.permute(1, 0, 2)  # 调整输入的维度顺序
        output = self.transformer_encoder(src)
        output = self.decoder(output[-1])  # 取最后一个时间步的输出
        return output

# 准备数据
input_dim = 3  # 输入维度
output_dim = 1  # 输出维度
num_samples = 1000  # 样本数
seq_length = 10  # 序列长度

data, labels = generate_multivariate_time_series(num_samples, seq_length, input_dim)
train_size = int(num_samples * 0.8)
train_data, train_labels = data[:train_size], labels[:train_size]
test_data, test_labels = data[train_size:], labels[train_size:]

# 准备训练数据加载器
train_dataset = torch.utils.data.TensorDataset(torch.tensor(train_data).float(), torch.tensor(train_labels).float())
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)

# 定义模型和优化器
model = TransformerModel(input_dim=input_dim, output_dim=output_dim, num_layers=2, heads=3, hidden_size=128)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 训练模型
model.train()
for epoch in range(20):
    running_loss = 0.0
    for inputs, labels in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs.squeeze(), labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    print(f"Epoch {epoch+1}/20, Loss: {running_loss}")

# 测试模型
model.eval()
test_inputs = torch.tensor(test_data).float()
with torch.no_grad():
    predicted = model(test_inputs).squeeze().numpy()

# 可视化预测结果
plt.plot(test_labels, label='True Labels')
plt.plot(predicted, label='Predicted Labels')
plt.legend()
plt.show()

  

posted on 2024-02-28 11:33  金凯旋  阅读(15)  评论(0编辑  收藏  举报

导航