打赏

深度学习中对两个目标标签进行训练和预测代码实例

#Tensofrlow
#假设我们有一个任务是从图像中预测物体的位置(x坐标和y坐标)和物体的类别。这个任务有三个目标标签:x坐标、y坐标和类别。
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense

# 创建模拟数据
num_samples = 1000
num_classes = 10

# 特征数据
X = np.random.rand(num_samples, 64)

# 目标标签:x坐标、y坐标、类别
y_x = np.random.rand(num_samples, 1)
y_y = np.random.rand(num_samples, 1)
y_class = np.random.randint(0, num_classes, size=(num_samples,))

# 划分训练集和测试集
split_ratio = 0.8
num_train_samples = int(num_samples * split_ratio)

X_train, X_test = X[:num_train_samples], X[num_train_samples:]
y_train_x, y_test_x = y_x[:num_train_samples], y_x[num_train_samples:]
y_train_y, y_test_y = y_y[:num_train_samples], y_y[num_train_samples:]
y_train_class, y_test_class = y_class[:num_train_samples], y_class[num_train_samples:]

# 创建输入层
input_layer = Input(shape=(64,))

# 创建多个输出层,每个输出层对应一个目标标签
output_x = Dense(1, name='output_x')(input_layer)
output_y = Dense(1, name='output_y')(input_layer)
output_class = Dense(num_classes, activation='softmax', name='output_class')(input_layer)

# 创建多输出模型
model = keras.Model(inputs=input_layer, outputs=[output_x, output_y, output_class])

# 编译模型,可以为每个输出指定不同的损失函数
model.compile(optimizer='adam',
              loss={'output_x': 'mean_squared_error',
                    'output_y': 'mean_squared_error',
                    'output_class': 'sparse_categorical_crossentropy'},
              metrics={'output_x': 'mae', 'output_y': 'mae', 'output_class': 'accuracy'})

# 训练模型
history = model.fit(X_train, {'output_x': y_train_x, 'output_y': y_train_y, 'output_class': y_train_class},
                    validation_split=0.2, epochs=10, batch_size=32)

# 使用模型进行多输出预测
y_pred_x, y_pred_y, y_pred_class = model.predict(X_test)

# y_pred_x包含了对x坐标的预测
# y_pred_y包含了对y坐标的预测
# y_pred_class包含了对类别的预测

‘’‘
在多输出模型中,每个输出层可以有不同的损失函数,以适应不同类型的目标标签。
‘’‘
#pythroch
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader

# 1. 数据准备
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = datasets.CIFAR10(root='./data', train=True, transform=transform, download=True)
test_dataset = datasets.CIFAR10(root='./data', train=False, transform=transform, download=True)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64)

# 2. 定义多目标分类模型
class MultiTargetModel(nn.Module):
    def __init__(self):
        super(MultiTargetModel, self).__init__()
        self.cnn = nn.Sequential(
            nn.Conv2d(3, 16, 3, 1, 1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Conv2d(16, 32, 3, 1, 1),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.fc1 = nn.Sequential(
            nn.Linear(32 * 8 * 8, 128),
            nn.ReLU(),
            nn.Linear(128, 10)  # 目标1的输出,假设有10个类别
        )
        self.fc2 = nn.Sequential(
            nn.Linear(32 * 8 * 8, 64),
            nn.ReLU(),
            nn.Linear(64, 5)  # 目标2的输出,假设有5个类别
        )

    def forward(self, x):
        x = self.cnn(x)
        x = x.view(x.size(0), -1)
        output1 = self.fc1(x)  # 目标1的预测
        output2 = self.fc2(x)  # 目标2的预测
        return output1, output2

# 3. 创建模型实例
model = MultiTargetModel()

# 4. 定义损失函数和优化器
criterion1 = nn.CrossEntropyLoss()  # 交叉熵损失用于目标1
criterion2 = nn.CrossEntropyLoss()  # 交叉熵损失用于目标2
optimizer = optim.SGD(model.parameters(), lr=0.01)

# 5. 模型训练
num_epochs = 10
for epoch in range(num_epochs):
    model.train()
    total_loss1 = 0.0
    total_loss2 = 0.0
    for batch_data, (labels1, labels2) in train_loader:
        optimizer.zero_grad()
        output1, output2 = model(batch_data)
        loss1 = criterion1(output1, labels1)
        loss2 = criterion2(output2, labels2)
        loss = loss1 + loss2
        loss.backward()
        optimizer.step()
        total_loss1 += loss1.item()
        total_loss2 += loss2.item()
    print(f"Epoch {epoch + 1}, Loss (Target 1): {total_loss1 / len(train_loader)}, Loss (Target 2): {total_loss2 / len(train_loader)}")

# 6. 模型评估
model.eval()
correct1 = 0
correct2 = 0
total = 0
with torch.no_grad():
    for batch_data, (labels1, labels2) in test_loader:
        output1, output2 = model(batch_data)
        _, predicted1 = torch.max(output1, 1)
        _, predicted2 = torch.max(output2, 1)
        total += labels1.size(0)
        correct1 += (predicted1 == labels1).sum().item()
        correct2 += (predicted2 == labels2).sum().item()

print(f"Accuracy on Test Data (Target 1): {100 * correct1 / total}%")
print(f"Accuracy on Test Data (Target 2): {100 * correct2 / total}%")

 

posted @ 2023-10-04 14:47  不像话  阅读(84)  评论(0编辑  收藏  举报