Loading...

Detectron2 部署简单的训练项目

Detectron2 官方文档里的 Getting Started 提供了两种使用 detectron2 的样例。其一是读者大概率已经阅读过的 Colab Notebook ——骑马王子和气球检测,其二是使用命令行执行的 python 文件,包括演示文件 demo.py 及自行用于部署的 train_net.py & plain_train_net.py 。Notebook 已述明使用 Mask-RCNN 进行 mask detection 的简单步骤,包括注册数据集、配置 config 以及训练和验证、可视化结果。由于人力标注费力的原因,自行标注的数据集通常只有目标物体 box 而未标注 mask,因此需使用 Faster-RCNN 进行训练,有关代码可参考 Medium 文章 以及其 代码

本篇参考 文章 ,简述 python 文件 + 命令行执行的项目部署方法,以便于在 GPU 服务器端运行。

目录一览

本项目部署目录如下:

Project
--configs
----COCO-Detection
      faster_rcnn_R_50_FPN_3x.yaml  # 官方代码库 copy
    Base-RCNN-FPN.yaml  # 同上
    my_config.yaml  # 自己的训练配置文件
--tools
    train_net.py  # 官方代码库 copy 并自行修改
    train.sh
    train_resume.sh
    eval.sh
--utils
    txt2coco.py  # 将自己的数据集转化为标准 coco 数据集

数据集目录如下:

MyDataset
--train
    01_00001.jpg  # 图片名无所谓
    ...
--val
    ...
  train.json  # 位置自行决定
  val.json

数据集注册

使用 CocoFormat 的数据集是最优雅的做法,当然你也可以按照官方给定的方法在 tools/rain_net.py 中自定义数据集。关于 csv/voc/labelme 等格式向 coco 格式的转化,可以参考 Github 的代码。本文 utils/txt2coco.py 基于上述代码将自行标注的 txt 格式数据转化为了 coco 格式。

为方便数据集注册,本文借鉴 文章 ,将数据集注册代码包装成类:

from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.coco import load_coco_json

class Register:
    """用于注册自己的数据集"""
    CLASS_NAMES = ['__background__', 'Red', 'Blue', 'Yellow', 'White', 'Black', 'Other', 'NoHelmet']  # 保留 background 类
    ROOT = "/home/your_dataset_dir"

    def __init__(self):
        self.CLASS_NAMES = Register.CLASS_NAMES or ['__background__', ]
        # 数据集路径
        self.DATASET_ROOT = Register.ROOT or '/home/yourdir'
        # ANN_ROOT = os.path.join(self.DATASET_ROOT, 'COCOformat')
        self.ANN_ROOT = self.DATASET_ROOT

        self.TRAIN_PATH = os.path.join(self.DATASET_ROOT, 'train')
        self.VAL_PATH = os.path.join(self.DATASET_ROOT, 'val')

        self.TRAIN_JSON = os.path.join(self.ANN_ROOT, 'train.json')
        self.VAL_JSON = os.path.join(self.ANN_ROOT, 'val.json')
        # VAL_JSON = os.path.join(self.ANN_ROOT, 'test.json')

        # 声明数据集的子集
        self.PREDEFINED_SPLITS_DATASET = {
            "coco_my_train": (self.TRAIN_PATH, self.TRAIN_JSON),
            "coco_my_val": (self.VAL_PATH, self.VAL_JSON),
        }

    def register_dataset(self):
        """
        purpose: register all splits of datasets with PREDEFINED_SPLITS_DATASET
        注册数据集(这一步就是将自定义数据集注册进Detectron2)
        """
        for key, (image_root, json_file) in self.PREDEFINED_SPLITS_DATASET.items():
            self.register_dataset_instances(name=key,
                                            json_file=json_file,
                                            image_root=image_root)

    @staticmethod
    def register_dataset_instances(name, json_file, image_root):
        """
        purpose: register datasets to DatasetCatalog,
                 register metadata to MetadataCatalog and set attribute
        注册数据集实例,加载数据集中的对象实例
        """
        DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
        MetadataCatalog.get(name).set(json_file=json_file,
                                      image_root=image_root,
                                      evaluator_type="coco")

    def plain_register_dataset(self):
        """注册数据集和元数据"""
        # 训练集
        DatasetCatalog.register("coco_my_train", lambda: load_coco_json(self.TRAIN_JSON, self.TRAIN_PATH))
        MetadataCatalog.get("coco_my_train").set(thing_classes=self.CLASS_NAMES,  # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
                                                 evaluator_type='coco',  # 指定评估方式
                                                 json_file=self.TRAIN_JSON,
                                                 image_root=self.TRAIN_PATH)

        # DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco_2017_val"))
        # 验证/测试集
        DatasetCatalog.register("coco_my_val", lambda: load_coco_json(self.VAL_JSON, self.VAL_PATH))
        MetadataCatalog.get("coco_my_val").set(thing_classes=self.CLASS_NAMES,  # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
                                               evaluator_type='coco',  # 指定评估方式
                                               json_file=self.VAL_JSON,
                                               image_root=self.VAL_PATH)

    def checkout_dataset_annotation(self, name="coco_my_val"):
        """
        查看数据集标注,可视化检查数据集标注是否正确,
        这个也可以自己写脚本判断,其实就是判断标注框是否超越图像边界
        可选择使用此方法
        """
        # dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
        dataset_dicts = load_coco_json(self.TRAIN_JSON, self.TRAIN_PATH)
        print(len(dataset_dicts))
        for i, d in enumerate(dataset_dicts, 0):
            # print(d)
            img = cv2.imread(d["file_name"])
            visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(name), scale=1.5)
            vis = visualizer.draw_dataset_dict(d)
            # cv2.imshow('show', vis.get_image()[:, :, ::-1])
            cv2.imwrite('out/' + str(i) + '.jpg', vis.get_image()[:, :, ::-1])
            # cv2.waitKey(0)
            if i == 200:
                break

将以上类置于 tools/train_net.py ,并在 main() 函数中第2行添加调用:

def main(args):
    cfg = setup(args)
    Register().register_dataset()  # register my dataset
    ...

编辑配置文件

从官方下载的 Faster_RCNN 配置文件及其依赖配置最好不要修改,而是基于其配置另写一个文件 configs/my_config.yaml

_BASE_: "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml"
DATASETS:
  TRAIN: ("coco_my_train",)
  TEST: ("coco_my_val",)
MODEL:
  RETINANET:
    NUM_CLASSES: 8  # 类别数+1, 因为有background
  # WEIGHTS: "../tools/output/model_final.pth"
SOLVER:
  # IMS_PER_BATCH: 16
  # 初始学习率
  BASE_LR: 0.00025
  # 迭代到指定次数,学习率进行衰减
  # STEPS: (210000, 250000)
  # MAX_ITER: 270000
  CHECKPOINT_PERIOD: 1000
TEST:
  EVAL_PERIOD: 3000

如需修改配置,在该文件中修改就好了。

Train & Eval

控制训练和测试的命令行代码置于 shell 文件中更优雅和容易控制。

################ train.sh ################
# Linux 下换行符为 CRLF 的需改为 LF
# lr = 0.00025 * num_gpus
python3 train_net.py \
  --config-file ../configs/my_config.yaml \
  --num-gpus 4 \
  SOLVER.IMS_PER_BATCH 16 \
  SOLVER.BASE_LR 0.001 \
  SOLVER.MAX_ITER 30000 \
  SOLVER.STEPS '(24000, 29000)'

############# train_resume.sh #############
# 断点续 train
# --num-gpus 亲测不能省略
python3 train_net.py \
  --config-file ../configs/my_config.yaml \
  --num-gpus 4 \
  --resume

################# eval.sh #################
python3 train_net.py \
  --config-file ../configs/my_config.yaml \
  --eval-only \
  MODEL.WEIGHTS output/model_final.pth

在命令行中执行 sh train.sh 即可开始训练。

附录

自己的 train_net.py 代码:

#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""

import logging
import os
from collections import OrderedDict

import cv2
import torch

import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.data.datasets import load_coco_json
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
    CityscapesInstanceEvaluator,
    CityscapesSemSegEvaluator,
    COCOEvaluator,
    COCOPanopticEvaluator,
    DatasetEvaluators,
    LVISEvaluator,
    PascalVOCDetectionEvaluator,
    SemSegEvaluator,
    verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
from detectron2.utils.visualizer import Visualizer

class Trainer(DefaultTrainer):
    """
    We use the "DefaultTrainer" which contains pre-defined default logic for
    standard training workflow. They may not work for you, especially if you
    are working on a new research project. In that case you can write your
    own training loop. You can use "tools/plain_train_net.py" as an example.
    """

    @classmethod
    def build_evaluator(cls, cfg, dataset_name, output_folder=None):
        """
        Create evaluator(s) for a given datasets.
        This uses the special metadata "evaluator_type" associated with each builtin datasets.
        For your own datasets, you can simply create an evaluator manually in your
        script and do not have to worry about the hacky if-else logic here.
        """
        if output_folder is None:
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
        evaluator_list = []
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
        if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
            evaluator_list.append(
                SemSegEvaluator(
                    dataset_name,
                    distributed=True,
                    num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
                    ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
                    output_dir=output_folder,
                )
            )
        if evaluator_type in ["coco", "coco_panoptic_seg"]:
            evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
        if evaluator_type == "coco_panoptic_seg":
            evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
        if evaluator_type == "cityscapes_instance":
            assert (
                    torch.cuda.device_count() >= comm.get_rank()
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesInstanceEvaluator(dataset_name)
        if evaluator_type == "cityscapes_sem_seg":
            assert (
                    torch.cuda.device_count() >= comm.get_rank()
            ), "CityscapesEvaluator currently do not work with multiple machines."
            return CityscapesSemSegEvaluator(dataset_name)
        elif evaluator_type == "pascal_voc":
            return PascalVOCDetectionEvaluator(dataset_name)
        elif evaluator_type == "lvis":
            return LVISEvaluator(dataset_name, cfg, True, output_folder)
        if len(evaluator_list) == 0:
            raise NotImplementedError(
                "no Evaluator for the datasets {} with the type {}".format(
                    dataset_name, evaluator_type
                )
            )
        elif len(evaluator_list) == 1:
            return evaluator_list[0]
        return DatasetEvaluators(evaluator_list)

    @classmethod
    def test_with_TTA(cls, cfg, model):
        logger = logging.getLogger("detectron2.trainer")
        # In the end of training, run an evaluation with TTA
        # Only support some R-CNN models.
        logger.info("Running inference with test-time augmentation ...")
        model = GeneralizedRCNNWithTTA(cfg, model)
        evaluators = [
            cls.build_evaluator(
                cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
            )
            for name in cfg.DATASETS.TEST
        ]
        res = cls.test(cfg, model, evaluators)
        res = OrderedDict({k + "_TTA": v for k, v in res.items()})
        return res

class Register:
    """用于注册自己的数据集"""
    CLASS_NAMES = ['__background__', 'Red', 'Blue', 'Yellow', 'White', 'Black', 'Other', 'NoHelmet']
    ROOT = "/home/mydir"

    def __init__(self):
        self.CLASS_NAMES = Register.CLASS_NAMES or ['__background__', ]
        # 数据集路径
        self.DATASET_ROOT = Register.ROOT or '/home/yourdir'
        # ANN_ROOT = os.path.join(self.DATASET_ROOT, 'COCOformat')
        self.ANN_ROOT = self.DATASET_ROOT

        self.TRAIN_PATH = os.path.join(self.DATASET_ROOT, 'train')
        self.VAL_PATH = os.path.join(self.DATASET_ROOT, 'val')

        self.TRAIN_JSON = os.path.join(self.ANN_ROOT, 'train.json')
        self.VAL_JSON = os.path.join(self.ANN_ROOT, 'val.json')
        # VAL_JSON = os.path.join(self.ANN_ROOT, 'test.json')

        # 声明数据集的子集
        self.PREDEFINED_SPLITS_DATASET = {
            "coco_my_train": (self.TRAIN_PATH, self.TRAIN_JSON),
            "coco_my_val": (self.VAL_PATH, self.VAL_JSON),
        }

    def register_dataset(self):
        """
        purpose: register all splits of datasets with PREDEFINED_SPLITS_DATASET
        注册数据集(这一步就是将自定义数据集注册进Detectron2)
        """
        for key, (image_root, json_file) in self.PREDEFINED_SPLITS_DATASET.items():
            self.register_dataset_instances(name=key,
                                            json_file=json_file,
                                            image_root=image_root)

    @staticmethod
    def register_dataset_instances(name, json_file, image_root):
        """
        purpose: register datasets to DatasetCatalog,
                 register metadata to MetadataCatalog and set attribute
        注册数据集实例,加载数据集中的对象实例
        """
        DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
        MetadataCatalog.get(name).set(json_file=json_file,
                                      image_root=image_root,
                                      evaluator_type="coco")

    def plain_register_dataset(self):
        """注册数据集和元数据"""
        # 训练集
        DatasetCatalog.register("coco_my_train", lambda: load_coco_json(self.TRAIN_JSON, self.TRAIN_PATH))
        MetadataCatalog.get("coco_my_train").set(thing_classes=self.CLASS_NAMES,  # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
                                                 evaluator_type='coco',  # 指定评估方式
                                                 json_file=self.TRAIN_JSON,
                                                 image_root=self.TRAIN_PATH)

        # DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco_2017_val"))
        # 验证/测试集
        DatasetCatalog.register("coco_my_val", lambda: load_coco_json(self.VAL_JSON, self.VAL_PATH))
        MetadataCatalog.get("coco_my_val").set(thing_classes=self.CLASS_NAMES,  # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
                                               evaluator_type='coco',  # 指定评估方式
                                               json_file=self.VAL_JSON,
                                               image_root=self.VAL_PATH)

    def checkout_dataset_annotation(self, name="coco_my_val"):
        """
        查看数据集标注,可视化检查数据集标注是否正确,
        这个也可以自己写脚本判断,其实就是判断标注框是否超越图像边界
        可选择使用此方法
        """
        # dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
        dataset_dicts = load_coco_json(self.TRAIN_JSON, self.TRAIN_PATH)
        print(len(dataset_dicts))
        for i, d in enumerate(dataset_dicts, 0):
            # print(d)
            img = cv2.imread(d["file_name"])
            visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(name), scale=1.5)
            vis = visualizer.draw_dataset_dict(d)
            # cv2.imshow('show', vis.get_image()[:, :, ::-1])
            cv2.imwrite('out/' + str(i) + '.jpg', vis.get_image()[:, :, ::-1])
            # cv2.waitKey(0)
            if i == 200:
                break

def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    default_setup(cfg, args)
    return cfg

def main(args):
    cfg = setup(args)
    Register().register_dataset()  # register my dataset

    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        res = Trainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(Trainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    """
    If you'd like to do anything fancier than the standard training logic,
    consider writing your own training loop (see plain_train_net.py) or
    subclassing the trainer.
    """
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks(
            [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
        )
    return trainer.train()

if __name__ == "__main__":
    args = default_argument_parser().parse_args()
    print("Command Line Args:", args)
    launch(
        main,
        args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        args=(args,),
    )

参考

[1]Getting Started with Detectron2 - Github

[2]利用detectron2快速使用faster RCNN - Medium

[3]Detectron2训练自己的数据集(较详细) - CSDN

[4]数据集格式转换 - Github

[5]Github 项目 - detectron2 安装与简单使用 - AIUAI

[6]Github 项目 - detectron2 定制数据集的模型训练 - AIUAI

posted @ 2020-10-08 16:47  板子~  阅读(4066)  评论(0编辑  收藏  举报