paddlepaddle自定义网络模型及onnx模型转换与推理

  前面介绍过了使用Paddleseg套件训练后,使用export.py文件导出模型为推理部署模型。具体可以参考之前的:https://www.cnblogs.com/wancy/p/18028138

  本文介绍使用paddle自定义简单二分类CNN模型到训练数据集再到转换onnx模型推理。

1.  数据集划分

  我这里将数据划分为train.txt与test.txt。

  train.txt与test.txt中每一行为:图片路径空格数字类别(二分类的话,就是0或1了。),具体可以参考我以前的鸢尾花的分类https://www.cnblogs.com/wancy/p/17868003.html,只不过这里换成了二分类数据集。

2. paddlepaddle自定义简单二分类CNN网络

import threading
import time

import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
from paddle.io import Dataset, DataLoader
from PIL import Image
from paddle.vision.transforms import ToTensor
from paddle.vision.transforms.functional import resize
import paddle.nn.functional as F

#paddle.set_device('gpu:0')  # 选择第一个 GPU
from paddle.vision.transforms import Normalize
import matplotlib.pyplot as plt  # 添加 Matplotlib


# 自定义数据集类
class CustomDataset(Dataset):
    def __init__(self, file_path, transform=None,target_size=(40, 55)):#w,h
        self.data_list = self.load_data(file_path)
        self.transform = transform
        self.target_size = target_size

    def load_data(self, file_path):
        with open(file_path, 'r') as file:
            lines = file.readlines()
        data = [line.strip().split() for line in lines]
        return data

    def __getitem__(self, idx):
        image_path, label = self.data_list[idx]
        image = Image.open(image_path)
        # Resize the image to the target size
        image = resize(image, size=self.target_size)
        # width,height=image.size
        image = ToTensor()(image)
        # 归一化图像
        image = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image)
        if self.transform:
            image = self.transform(image)

        return image, paddle.to_tensor(int(label), dtype='int64')

    def __len__(self):
        return len(self.data_list)
# 定义简单的卷积神经网络 class SimpleCNN(nn.Layer): def __init__(self, num_classes=2): super(SimpleCNN, self).__init__() self.conv1 = nn.Conv2D(3, 32, kernel_size=3, padding=1) self.bn1 = nn.BatchNorm2D(32) self.pool1 = nn.MaxPool2D(kernel_size=2, stride=2)#它将输入特征图的宽度和高度减半 #8,32,200,25 self.conv2 = nn.Conv2D(32, 64, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2D(64) self.pool2 = nn.MaxPool2D(kernel_size=2, stride=2) # self.global_pooling = nn.AdaptiveAvgPool2D((1, 1)) # 使用全局平均池化 #考虑使用1x1卷积减小参数 # 添加1x1卷积层 self.conv3 = nn.Conv2D(64, 1, kernel_size=1) # 将通道数变为1 self.bn3 = nn.BatchNorm2D(1) self.pool3=nn.MaxPool2D(kernel_size=2, stride=2) self.fc = nn.Linear(30, 1,weight_attr=nn.initializer.KaimingNormal())# def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.pool1(x) x = self.conv2(x) x = self.bn2(x) x = self.pool2(x) x = self.conv3(x) x = self.bn3(x) x = self.pool3(x) # x = self.global_pooling(x) x = paddle.flatten(x, start_axis=1) x = self.fc(x) return x # 创建数据集和数据加载器 train_dataset = CustomDataset('train.txt',transform=None, target_size=(40, 55)) test_dataset = CustomDataset('test.txt',transform=None, target_size=(40, 55)) train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=8, shuffle=False) # 实例化模型、定义损失函数和优化器 model = SimpleCNN() optimizer = opt.Adam(parameters=model.parameters(), learning_rate=0.01) # 定义保存和加载模型的文件路径 model_save_path = r'C:\model\model.pdparams' # 尝试加载已有的模型参数 try: model_state_dict = paddle.load(model_save_path) model.set_state_dict(model_state_dict) print(f'Model loaded from {model_save_path}') except Exception as e: print(f'No pre-existing model found: {e}') optimizer_path=r'\model\adam.pdopt' try: opt_state_dict = paddle.load(optimizer_path) print("opt_state_dict",opt_state_dict) optimizer.set_state_dict(opt_state_dict) except Exception as e: print(f'No pre-existing optimizer model found: {e}') # 绘制损失值的动态图 def plot_loss_dynamic(loss_values,flag): while not flag.is_set(): plt.clf() plt.plot(loss_values, label='Training Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Real-time Training Loss Monitoring') plt.legend() plt.pause(0.1) # 训练模型 def train(model, train_loader, optimizer, num_epochs=50,val_loader=None): print("num_epochs",num_epochs) best_val_loss = float('inf') # 保存最佳验证集损失值 losses = [] # 保存每个 epoch 的损失值 exit_flag = threading.Event() # 创建一个 Event 用于通知线程退出 def plot_thread(): nonlocal exit_flag while not exit_flag.is_set(): time.sleep(1) # 每秒更新一次图表 plot_loss_dynamic(losses,exit_flag) plot_thread_instance = threading.Thread(target=plot_thread) plot_thread_instance.start() for epoch in range(num_epochs): for data, labels in train_loader: outputs = model(data) labels = labels.unsqueeze(1).astype('float32') loss = F.binary_cross_entropy_with_logits(outputs, labels) optimizer.clear_grad() loss.backward() optimizer.step() losses.append(loss.numpy()) # 计算验证集上的损失 val_losses = [] for val_data, val_labels in val_loader: val_outputs = model(val_data) val_labels = val_labels.unsqueeze(1).astype('float32') val_loss = F.binary_cross_entropy_with_logits(val_outputs, val_labels) val_losses.append(val_loss.numpy()) avg_val_loss = np.mean(val_losses) print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.numpy()}, Val Loss: {avg_val_loss}') # 如果验证集上的损失更低,则保存当前模型 if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss paddle.save(model.state_dict(), r'C:\model\model.pdparams') # 保存参数信息,权重与偏置 # 保存优化器参数 paddle.save(optimizer.state_dict(), r"C:\model\adam.pdopt") print(f'Best model saved at epoch {epoch + 1}') plot_thread_instance.join() ## 等待线程正常退出 # 测试模型 def test(model, test_loader,threshold = 0.5): model.eval() correct = 0 total = 0 with paddle.no_grad(): for data, labels in test_loader: outputs = model(data) predicted = paddle.squeeze(F.sigmoid(outputs) >= threshold) total += labels.shape[0] binary_predicted = (predicted >= threshold).astype('float32') correct += (binary_predicted == labels.numpy()).sum().item()print("total",total) accuracy = correct / total print(f'Test Accuracy: {accuracy}') # 训练和测试 train(model, train_loader,optimizer, num_epochs=20,val_loader=test_loader) # test(model, test_loader)#测试集的准确率 #0.90 # test(model, train_loader)#训练集的准确率 1.0

  训练过程中,会动态绘制图形,可以看到损失函数。训练完后,会保存model.pdparams文件。还有一个adam.pdopt文件。测试测试集可以将最后几行取消掉,可以将train注释掉。

3. 读取单张图片测试

def predict_single_image(model, image_path, threshold=0.5):
    model.eval()#评估模式
    # 读取图片并进行预处理
    image = Image.open(image_path)#RGB
    image = resize(image, size=(40, 55))#宽度,高度
    # print(image)#[1, 3, 40, 55]
    image = ToTensor()(image)
    image = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image)
    image = paddle.unsqueeze(image, axis=0)  # 添加 batch 维度
    # 模型预测
    with paddle.no_grad():
        outputs = model(image)# 使用 sigmoid 函数将输出转换为概率
    probabilities = F.sigmoid(outputs)
    # 预测结果(二分类,使用阈值判断)
    predicted_label = 1 if probabilities >= threshold else 0

    return predicted_label, probabilities.numpy()

if __name__ == '__main__':
    # 重新加载模型预测测试
    model = SimpleCNN()
    # 定义保存和加载模型的文件路径
    model_save_path = r'C:\model.pdparams'
    state_dict = paddle.load(model_save_path)
    model.set_state_dict(state_dict)
    image_path = r'C:\0.png' 
    # 设置阈值
    threshold = 0.5
    predicted_label, probabilities = predict_single_image(model, image_path, threshold)
    print(f'Predicted Label: {predicted_label}')
    print(f'Probabilities: {probabilities}')
    if predicted_label == 1:  # ok
       print("1")
    elif predicted_label == 0:  # ng
       print("0")

  由于是二分类,输出结果是0或者1。

3. 转换为ONNX模型

  3.1 转可用于推理部署的静态模型

#1.转静态模型
import paddle
# from paddle.jit import to_static
# 实例化模型类
model = SimpleCNN()
# 加载训练好的模型参数
model_path = r'C:\model'
model_filename = 'model.pdparams'
model_state_dict = paddle.load(os.path.join(model_path, model_filename))
model.set_state_dict(model_state_dict)
# 定义输入规格,这里以一个动态批量大小和固定图像尺寸为例
input_spec = [InputSpec(shape=[None, 3, 40, 55], dtype='float32')]#C,W,H
# 使用 paddle.jit.to_static 来转换模型为静态图,并指定 input_spec
static_model = paddle.jit.to_static(model, input_spec=input_spec)
# 保存静态图模型
static_model_dir = 'static_model'
os.makedirs(static_model_dir, exist_ok=True)
static_model_path = os.path.join(static_model_dir, 'static_model')
paddle.jit.save(static_model, static_model_path)

  运行后会生成静态模型,包括.pdiparams参数权重文件、.pdmodel模型结构文件,还有一个.info文件不用管。

  3.2 命令行转onnx模型

  需要paddle2onnx ,使用清华镜像源安装:  pip install paddle2onnx -i https://pypi.tuna.tsinghua.edu.cn/simple

paddle2onnx --model_dir C:/output --model_filename C:/static_model.pdmodel --params_filename C:/static_model.pdiparams --opset_version 11 --save_file C:/output/output.onnx

  此时,生成了一个output.onnx文件

4.  onnx模型的推理

  前面我们对数据进行了处理,再输入模型预测的。想要得到相同的结果,数据预处理必须相同。代码如下:

import onnxruntime
from paddle.vision.transforms import ToTensor
from paddle.vision.transforms.functional import resize
from paddle.vision.transforms import Normalize
import paddle.nn.functional as F
onnx_path=r'C:\output\output.onnx'
sess=onnxruntime.InferenceSession(onnx_path,providers=['CUDAExecutionProvider'])
image_path=r'C:\images\0.png'
# 读取图片并进行预处理
# image = Image.open(image_path)或者用下面的两句
image=cv2.imread(image_path,cv2.IMREAD_UNCHANGED)
# 将 BGR 转换为 RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 推理时image=cv2.imread(image_path,cv2.IMREAD_UNCHANGED)与image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)才与image = Image.open(image_path)等价。
image = resize(image, size=(40, 55))  # 宽度,高度
image = ToTensor()(image)
image = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(image)
image = paddle.unsqueeze(image, axis=0)  # 添加 batch 维度input_name = sess.get_inputs()[0].name
# print(input_name)
image=image.numpy()
result,=sess.run(None,{input_name :image})#result就是自己模型推理的结果,根据你模型返回的元组数据个数,假设只有一个返回值,就是result
print("result=",result)
threshold=0.5
# 使用 sigmoid 函数将输出转换为概率
tensor_result = paddle.to_tensor(result)
probabilities = F.sigmoid(tensor_result)
# 预测结果(二分类,使用阈值判断)
predicted_label = 1 if probabilities >= threshold else 0
print(predicted_label)
print(probabilities.numpy())

 

小结:本文是做一个比较小的图片的良与不良的二分类,所以设置的网络输入尺寸比较小。而且网络比较简单,大家可以根据自己的任务需求自己更改。

  若存在不足或错误之处欢迎指正与评论!

posted @ 2024-04-29 20:13  wancy  阅读(416)  评论(0编辑  收藏  举报