Detectron2训练自己的数据集(较详细)

更新时间:2023-06-06 12:52:54 阅读: 评论:0

Detectron2训练⾃⼰的数据集(较详细)
上篇⽂章讲了如何在Centos7上配置Detectron2的环境,这⾥讲下如何训练⾃⼰的数据集,主要是针对⽬标检测。
在GETTING_STARTED.md官⽅⽂档⾥写了,官⽅提供了⼀个教程去将如何训练⾃⼰的数据集,但是⽹址,我这边没有访问成功,所以只能⾃⾏百度了,好在有好⼼的博主。
如何训练⾃⼰的数据集呢?
1 需要将⾃⼰的数据集转为COCO格式,具体的转换代码,可以参考这个很详细,⾜够让您的数据集进⾏转换了。
2 将数据集注册到Detectron2中,说起来很⾼⼤上,其实就是将⾃⼰的数据集通过⼀种途径加载到项⽬中。
这⾥需要关注⼏个⽂件:在detectron2/tools/中,有README.md,打开该⽂件,则看到了该⽂件夹下⼏个py⽂件的作⽤,所以可以详细看下,我们训练时主要是关注 train_net.py 和 plain_train_net.py 。其实只要关注 train_net.py 就⾏了,我们可以将这个 train_net.py 这个⽂件复制为 train.py 【直接复制即可,先不要改动内容】,即以下的情况:
该⽂件⾥⾯包含了Trainer这个类,继承了DefaultTrainer,这⾥重写了build_evaluator 和 test_with_TTA这两个⽅法,根据英⽂注释,可以明显看出这两个⽅法,⼀个是创建对应数据集的评价器,⼀个是对应于RCNN系列的评价。这个⽬前只需要知道就⾏。以下直接复制后的 train.py
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Rerved
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to the built-in models and therefore
may not be suitable for your own project.
For example, your rearch project perhaps only needs a single "evaluator".
Therefore, we recommend you to u detectron2 as an library and take
this file as an example of how to u the library.
You may want to write your own script with your datats and other customizations.
"""
职称英语考试资料
import logging
import os
from collections import OrderedDict
import torch
import as comm
from detectron2.checkpoint import DetectionCheckpointer
fig import get_cfg
rain manfrom detectron2.data import MetadataCatalog
ine import DefaultTrainer, default_argument_parr, default_tup, hooks, launch
from detectron2.evaluation import(
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatatEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
verify_results,
)
deling import GeneralizedRCNNWithTTA
class Trainer(DefaultTrainer):
"""
We u the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new rearch project. In that ca you can write your
own training loop. You can u "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, datat_name, output_folder=None):
"""
Create evaluator(s) for a given datat.
This us the special metadata "evaluator_type" associated with each builtin datat.        For your own datat, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-el logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR,"inference")
evaluator_list =[]
evaluator_type = (datat_name).evaluator_type if evaluator_type in["m_g","coco_panoptic_g"]:
evaluator_list.append(
SemSegEvaluator(
datat_name,
distributed=True,
family treenum_class=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
output_dir=output_folder,
)
)
if evaluator_type in["coco","coco_panoptic_g"]:
evaluator_list.append(COCOEvaluator(datat_name, cfg,True, output_folder)) if evaluator_type =="coco_panoptic_g":
evaluator_list.append(COCOPanopticEvaluator(datat_name, output_folder)) if evaluator_type =="cityscapes_instance":
asrt(
torch.cuda.device_count()>= _rank()
),"CityscapesEvaluator currently do not work with multiple machines."
脑力开发
return CityscapesInstanceEvaluator(datat_name)
if evaluator_type =="cityscapes_m_g":
asrt(
torch.cuda.device_count()>= _rank()
),"CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(datat_name)
elif evaluator_type =="pascal_voc":
return PascalVOCDetectionEvaluator(datat_name)
elif evaluator_type =="lvis":
return LVISEvaluator(datat_name, cfg,True, output_folder)
if len(evaluator_list)==0:
rai NotImplementedError(
"no Evaluator for the datat {} with the type {}".format(
datat_name, evaluator_type
)
)
the boys 英文歌词
elif len(evaluator_list)==1:
return evaluator_list[0]
return DatatEvaluators(evaluator_list)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = Logger("ainer")娘娘腔英文
# In the end of training, run an evaluation with TTA
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators =[
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR,"inference_TTA") )
for name in cfg.DATASETS.TEST
]
res = st(cfg, model, evaluators)
res = OrderedDict({k +"_TTA": v for k, v in res.items()})
return res
def tup(args):
"""
Create configs and perform basic tups.
"""
cfg = get_cfg()
<_from_fig_file)
<_from_list(args.opts)
cfg.freeze()
default_tup(cfg, args)
return cfg
def main(args):
cfg = tup(args)
plain_register_datat()
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(            cfg.MODEL.WEIGHTS, sume
)
res = st(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.st_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
walkaway"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (e plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
if cfg.TEST.AUG.ENABLED:
[hooks.EvalHook(0,lambda: st_with_TTA(cfg, del))]
)
ain()
if __name__ =="__main__":
args = default_argument_parr().par_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
)
更改以上的 train.py
1 注册⾃⼰的数据集
注意这个CLASS_NAMES列表,这⾥⾯⼀定要与你的COCO格式的⽂件种类的ID顺序保持⼀致 程序中会将CLASS_NAMES这个列表映射为[0,len(CLASS_NAMES))的形式,如果您的COCO格式的数据集,category_id是从1开始的,最好在你的 json⽂件中加上category_id:0 name:background,可以不含该背景类的标注 Annotations信息,⽐如背景所在的区域/宽/⾼等但是⼀定要有这个category_id:0的这个类,不然等到训练后测试时,你就傻眼了但是算上背景类会有个问题,⼀⽅⾯会让你的分类和回归头部的特征图数⽬增加,另⼀⽅⾯可能导致计算的mAP值变⼩,因为多计算了⼀个背景类 所以推荐你的json⽂件中的category_id直接就是从0开始,那样就不⽤加 cate_id:0 background 那个类了,⾮COCO数据集最好采⽤下标为 0-n-1(n为数据集的类数),⽐如,category_id:0,name:name1,category_id:1,name:name2 …类名尽量⽤英⽂中⽂会乱码
以下信息直接放在 复制后的 train.py ⾥⾯即可
数据存放路径为以下格式:
----- yourdataDir
--------- JPEGImages
-------------***.jpg
-------------…jpg
--------- COCOformat
--------------train.json
--------------test.json
--------------val.json
#引⼊以下注释
from detectron2.data import DatatCatalog, MetadataCatalog
from detectron2. import load_coco_json
import pycocotools
#声明类别,尽量保持
CLASS_NAMES =["__background__","name_1","name_2"...]
# 数据集路径
DATASET_ROOT ='/home/Yourdatadir'
ANN_ROOT = os.path.join(DATASET_ROOT,'COCOformat')
reali的意思TRAIN_PATH = os.path.join(DATASET_ROOT,'JPEGImages')
VAL_PATH = os.path.join(DATASET_ROOT,'JPEGImages')
TRAIN_JSON = os.path.join(ANN_ROOT,'train.json')
#VAL_JSON = os.path.join(ANN_ROOT, 'val.json')
VAL_JSON = os.path.join(ANN_ROOT,'test.json')
# 声明数据集的⼦集
PREDEFINED_SPLITS_DATASET ={
"coco_my_train":(TRAIN_PATH, TRAIN_JSON),
"coco_my_val":(VAL_PATH, VAL_JSON),
}
#===========以下有两种注册数据集的⽅法,本⼈直接⽤的第⼆个plain_register_datat的⽅式也可以⽤register_datat的形式================== #注册数据集(这⼀步就是将⾃定义数据集注册进Detectron2)
def register_datat():
"""
purpo: register all splits of datat with PREDEFINED_SPLITS_DATASET
"""
for key,(image_root, json_file)in PREDEFINED_SPLITS_DATASET.items():
register_datat_instances(name=key,
json_file=json_file,
image_root=image_root)
#注册数据集实例,加载数据集中的对象实例
def register_datat_instances(name, json_file, image_root):
"""
purpo: register datat to DatatCatalog,
purpo: register datat to DatatCatalog,
register metadata to MetadataCatalog and t attribute
"""
<(name).t(json_file=json_file,
image_root=image_root,
evaluator_type="coco")
#=============================
# 注册数据集和元数据
def plain_register_datat():
#训练集
<("coco_my_train").t(thing_class=CLASS_NAMES,# 可以选择开启,但是不
能显⽰中⽂,这⾥需要注意,中⽂的话最好关闭                                                    evaluator_type='coco',# 指定评估⽅式
json_file=TRAIN_JSON,
image_root=TRAIN_PATH)
#ister("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco_2017_val"))
#验证/测试集
<("coco_my_val").t(thing_class=CLASS_NAMES,# 可以选择开启,但是不能显⽰中⽂,这⾥需要注意,中⽂的话最好关闭                                                evaluator_type='coco',# 指定评估⽅式
json_file=VAL_JSON,
image_root=VAL_PATH)
# 查看数据集标注,可视化检查数据集标注是否正确,
#这个也可以⾃⼰写脚本判断,其实就是判断标注框是否超越图像边界
#可选择使⽤此⽅法
def checkout_datat_annotation(name="coco_my_val"):
#datat_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
datat_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH)
print(len(datat_dicts))
for i, d in enumerate(datat_dicts,0):
party animal#print(d)
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:,:,::-1], (name), scale=1.5)
vis = visualizer.draw_datat_dict(d)
相信英文#cv2.imshow('show', _image()[:, :, ::-1])
cv2.imwrite('out/'+str(i)+'.jpg',_image()[:,:,::-1])
#cv2.waitKey(0)
if i ==200:
break
以上就是数据集的注册了,下⾯我们针对具体的任务,可以设置⼀些超参数,主要是在tup⽅法⾥设置:
以RetinaNet_R_50为例:

本文发布于:2023-06-06 12:52:54,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/90/135889.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:数据   标注   训练   背景   注册
相关文章
留言与评论(共有 0 条评论)
   
验证码:
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图