当前位置:网站首页>[deep learning] [original] let yolov6-0.1.0 support the txt reading dataset mode of yolov5
[deep learning] [original] let yolov6-0.1.0 support the txt reading dataset mode of yolov5
2022-07-05 16:29:00 【FL1623863129】
Meituan gave a yolov6 The framework looks very good at present , Because I didn't come out for long , Many are not perfect . Today, I specially trained my data set and found that this framework can only be placed according to this pattern :
custom_dataset
├── images
│ ├── train
│ │ ├── train0.jpg
│ │ └── train1.jpg
│ ├── val
│ │ ├── val0.jpg
│ │ └── val1.jpg
│ └── test
│ ├── test0.jpg
│ └── test1.jpg
└── labels
├── train
│ ├── train0.txt
│ └── train1.txt
├── val
│ ├── val0.txt
│ └── val1.txt
└── test
├── test0.txt
└── test1.txtAnd I prefer yolov5 The pattern of , Of course yolov5 It also supports the above placement mode .
images-
1.jpg
2.jpg
......
labels-
1.txt
2.txt
.......
Then put the split data set txt Inside
train.txt
/home/fut/data/images/1.jpg
/home/fut/data/images/2.jpg
....
val.txt
/home/fut/data/images/6.jpg
/home/fut/data/images/7.jpg
....
Configure in the configuration file :
train: myproj/config/train.txt
val: myproj/config/val.txt
nc: 2
# whether it is coco dataset, only coco dataset should be set to True.
is_coco: False
# class names
names: ['dog','cat']So you don't have to cut four folders at a time . Don't talk much, start changing the code , We turn on YOLOv6-0.1.0/yolov6/data/datasets.py modify
def get_imgs_labels(self, img_dir): This function loads the mode . Here is the modified complete code of this function
def get_imgs_labels(self, img_dir):
NUM_THREADS = min(8, os.cpu_count())
if os.path.isdir(img_dir):
valid_img_record = osp.join(
osp.dirname(img_dir), "." + osp.basename(img_dir) + ".json"
)
img_paths = glob.glob(osp.join(img_dir, "*"), recursive=True)
img_paths = sorted(
p for p in img_paths if p.split(".")[-1].lower() in IMG_FORMATS
)
assert img_paths, f"No images found in {img_dir}."
else:
with open(img_dir,'r') as f:
img_paths = f.read().rstrip('\n').split('\n')
valid_img_record = os.path.dirname(img_dir)+os.sep+'.'+osp.basename(img_dir)[:-4] + ".json"
img_hash = self.get_hash(img_paths)
if osp.exists(valid_img_record):
with open(valid_img_record, "r") as f:
cache_info = json.load(f)
if "image_hash" in cache_info and cache_info["image_hash"] == img_hash:
img_info = cache_info["information"]
else:
self.check_images = True
else:
self.check_images = True
# check images
if self.check_images and self.main_process:
img_info = {}
nc, msgs = 0, [] # number corrupt, messages
LOGGER.info(
f"{self.task}: Checking formats of images with {NUM_THREADS} process(es): "
)
with Pool(NUM_THREADS) as pool:
pbar = tqdm(
pool.imap(TrainValDataset.check_image, img_paths),
total=len(img_paths),
)
for img_path, shape_per_img, nc_per_img, msg in pbar:
if nc_per_img == 0: # not corrupted
img_info[img_path] = {"shape": shape_per_img}
nc += nc_per_img
if msg:
msgs.append(msg)
pbar.desc = f"{nc} image(s) corrupted"
pbar.close()
if msgs:
LOGGER.info("\n".join(msgs))
cache_info = {"information": img_info, "image_hash": img_hash}
# save valid image paths.
with open(valid_img_record, "w") as f:
json.dump(cache_info, f)
# # check and load anns
# label_dir = osp.join(
# osp.dirname(osp.dirname(img_dir)), "coco", osp.basename(img_dir)
# )
# assert osp.exists(label_dir), f"{label_dir} is an invalid directory path!"
img_paths = list(img_info.keys())
label_dir = os.path.dirname(img_paths[0]).replace('images', 'labels')
label_paths = sorted(
osp.join(label_dir, osp.splitext(osp.basename(p))[0] + ".txt")
for p in img_paths
)
label_hash = self.get_hash(label_paths)
if "label_hash" not in cache_info or cache_info["label_hash"] != label_hash:
self.check_labels = True
if self.check_labels:
cache_info["label_hash"] = label_hash
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number corrupt, messages
LOGGER.info(
f"{self.task}: Checking formats of labels with {NUM_THREADS} process(es): "
)
with Pool(NUM_THREADS) as pool:
pbar = pool.imap(
TrainValDataset.check_label_files, zip(img_paths, label_paths)
)
pbar = tqdm(pbar, total=len(label_paths)) if self.main_process else pbar
for (
img_path,
labels_per_file,
nc_per_file,
nm_per_file,
nf_per_file,
ne_per_file,
msg,
) in pbar:
if nc_per_file == 0:
img_info[img_path]["labels"] = labels_per_file
else:
img_info.pop(img_path)
nc += nc_per_file
nm += nm_per_file
nf += nf_per_file
ne += ne_per_file
if msg:
msgs.append(msg)
if self.main_process:
pbar.desc = f"{nf} label(s) found, {nm} label(s) missing, {ne} label(s) empty, {nc} invalid label files"
if self.main_process:
pbar.close()
with open(valid_img_record, "w") as f:
json.dump(cache_info, f)
if msgs:
LOGGER.info("\n".join(msgs))
if nf == 0:
LOGGER.warning(
f"WARNING: No labels found in {osp.dirname(self.img_paths[0])}. "
)
if self.task.lower() == "val":
if self.data_dict.get("is_coco", False): # use original json file when evaluating on coco dataset.
assert osp.exists(self.data_dict["anno_path"]), "Eval on coco dataset must provide valid path of the annotation file in config file: data/coco.yaml"
else:
assert (
self.class_names
), "Class names is required when converting labels to coco format for evaluating."
save_dir = osp.join(osp.dirname(osp.dirname(img_dir)), "annotations")
if not osp.exists(save_dir):
os.mkdir(save_dir)
save_path = osp.join(
save_dir, "instances_" + osp.basename(img_dir) + ".json"
)
TrainValDataset.generate_coco_format_labels(
img_info, self.class_names, save_path
)
img_paths, labels = list(
zip(
*[
(
img_path,
np.array(info["labels"], dtype=np.float32)
if info["labels"]
else np.zeros((0, 5), dtype=np.float32),
)
for img_path, info in img_info.items()
]
)
)
self.img_info = img_info
LOGGER.info(
f"{self.task}: Final numbers of valid images: {len(img_paths)}/ labels: {len(labels)}. "
)
return img_paths, labels
边栏推荐
- Pits encountered in the use of boolean type in development
- Cartoon: what is blue-green deployment?
- Explain in detail the functions and underlying implementation logic of the groups sets statement in SQL
- sql中set标签的使用
- 用键盘输入一条命令
- Intelligent metal detector based on openharmony
- 视觉体验全面升级,豪威集团与英特尔Evo 3.0共同加速PC产业变革
- Apiccloud cloud debugging solution
- Domestic API management artifact used by the company
- 移动办公时如何使用frp内网穿透+teamviewer方式快速连入家中内网主机
猜你喜欢

数据访问 - EntityFramework集成

The new version of effect editor is online! 3D rendering, labeling, and animation, this time an editor is enough

【学术相关】多位博士毕业去了三四流高校,目前惨不忍睹……

Pspnet | semantic segmentation and scene analysis

ES6深入—ES6 Generator 函数

数据湖(十四):Spark与Iceberg整合查询操作

Use of RLOCK lock

Research and development efficiency measurement index composition and efficiency measurement methodology
![21. [STM32] I don't understand the I2C protocol. Dig deep into the sequence diagram to help you write the underlying driver](/img/f4/2c935dd9933f5cd4324c29c41ab221.png)
21. [STM32] I don't understand the I2C protocol. Dig deep into the sequence diagram to help you write the underlying driver

Reduce the cost by 40%! Container practice of redis multi tenant cluster
随机推荐
漫画:什么是分布式事务?
Cheer yourself up
异常com.alibaba.fastjson.JSONException: not match : - =
vant tabbar遮挡内容的解决方式
The database of the server is not connected to 200310060 "unknown error" [the service is up, the firewall is off, the port is on, and the netlent port is not connected]
[js] 技巧 简化if 判空
Replknet: it's not that large convolution is bad, but that convolution is not large enough. 31x31 convolution. Let's have a look at | CVPR 2022
践行自主可控3.0,真正开创中国人自己的开源事业
Flet教程之 09 NavigationRail 基础入门(教程含源码)
事务回滚异常
搜索 正排索引 和 倒排索引 区别
Coding devsecops helps financial enterprises run out of digital acceleration
详解SQL中Groupings Sets 语句的功能和底层实现逻辑
【深度学习】深度学习如何影响运筹学?
You should have your own persistence
obj解析为集合
程序员如何提升自己的格局?
Pspnet | semantic segmentation and scene analysis
[深度学习][原创]让yolov6-0.1.0支持yolov5的txt读取数据集模式
Query the latest record in SQL