闲话:标注数据一直都是深度学习中代价非常大的工作,而重复劳动对人来说又是极痛苦的。做了几个目标检测的项目后一直想要做一个半自动标注的工具,但是对GUI类界面从设计到功能感觉工作量还是挺大的,之前也没有多少经验。突然想到,为什么一定得自己做一个呢,把检测到的结果转换成labelme格式的json文件,用labelme来对结果进行修改不是很好吗?本着这样的想法于是就有了下面的内容,这也省掉了非常非常多的精力,事情也变得简单了。
摘要:
本篇文章针对的是darkNet YOLOv4目标检测类的任务的数据半自动标注问题,具体的流程就是:
1.先手动标注小批量数据训练模型;
2.用模型对另一小批数据进行预测;
3.把检测的结果转换成labelme格式的json文件,用labelme打开进行调整修改;
4.修改后的数据加入训练集,训练模型;
5.数据量足够则结束,否则回到第2步。
@[toc]
1. darkNet 读取图片预测
1.1 打包darknet
darknet配置可参考link
这里采用把项目darkNet框架导出为dll文件调用的方式,这样可以是程序变得精简有条理。用vs编译下面的yolo_cpp_dll即可。
1.2 配置新项目
-
opencv配置包含目录,库目录
在这里插入图片描述 -
连接器--->附加依赖项
在这里插入图片描述 -
项目源文件处添加三个文件
darknet.h,yolo_v2_class.hpp是darknet项目中的文件
yolo_cpp_dll.lib由yolo_cpp_dll.sln项目编译生成
在这里插入图片描述 -
项目exe路径处添加两个dll文件
yolo_cpp_dll.dll由yolo_cpp_dll.sln项目编译生成;
pthreadVC2.dll是darkNet的依赖项,在darkNet项目中的darknet\build\darknet\x64路径下
在这里插入图片描述
2. 预测结果转换为labelme格式
2.1 说明
darknet yolo检测出来的结果是用std::vector<bboxt> 格式存储的,bbox_t是结构体,在yolo_v2_class.hpp中定义如下:
struct bbox_t {
unsigned int x, y, w, h; // (x,y) - top-left corner, (w, h) - width & height of bounded box
float prob; // confidence - probability that the object was found correctly
unsigned int obj_id; // class of object - from range [0, classes-1]
unsigned int track_id; // tracking id for video (0 - untracked, 1 - inf - tracked object)
unsigned int frames_counter; // counter of frames on which the object was detected
float x_3d, y_3d, z_3d; // center of object (in Meters) if ZED 3D Camera is used
};
labelme中标注类型为rectangle类型时的标签文件内容如下,
[图片上传失败...(image-6ca85c-1625645408509)]
2.2 转换函数
int resultWriteToJson(const std::string jsonPath, const std::string imagePath, const int imgH, const int imgW, const std::vector<bbox_t> &result)
{
//input
//jsonPath: json file abspath,
//imagePath: labelme contain the image path,(write to json)
std::ofstream out(jsonPath, std::ios::out);//std::ios::app add to bottom of the file
if (!out.is_open())
{
std::cout << "cant open the " << jsonPath << "!\n";
return -1;
}
// write the json table head
out << "{\n" << "\"version\":\"4.5.7\",\n";
out << "\"flags\" : {},\n";
out << "\"shapes\" : [\n";
//
for (int i = 0; i < result.size(); i++)
{
bbox_t box = result[i];
out << "{\n";
out << "\"label\":" << "\"" << box.obj_id << "\",\n";
out << "\"points\":[\n";
out << "[\n" << box.x << ",\n" << box.y << "\n],\n";
out << "[\n" << box.x + box.w << ",\n" << box.y + box.h << "\n]\n";
out << "],\n";
out << "\"group_id\":null,\n";
out << "\"shape_type\":\"rectangle\",\n";
out << "\"flags\":{}\n";
out << "}";
if (i != result.size() - 1) out << ",\n";//最后一个}后面没有逗号","
}
out << "],\n";
out << "\"imagePath\" :" << "\"" << imagePath << "\",\n";
out << "\"imageData\" :" << "null,\n";
out << "\"imageHeight\":" << imgH << ",\n";
out << "\"imageWidth\":" << imgW << "\n";
out << "}\n";
out.close();
return 0;
}
2.3 转换示例
用模型读取一张图片预测,把结果转为labelme格式如下图,与2.1中的手动标注的文件比较可以发现,除了格式没有缩进外,其他内容都是一样的了(预测的位置和手动标注的位置有差别是正常的),用labelme是可以读取的。
在这里插入图片描述
3. 完整源码
函数说明:
- selectResults 此函数删除边界上的结果
- drawResults 此函数可视化检测结果
- demo1 此函数展示预测一张图片,显示结果,保存结果为labelme格式
- demo2 对一个文件夹中的图片批量预测并显示结果
-
demo3 对一个文件夹中的图片批量预测并保存为labelme格式
在这里插入图片描述
#include <iostream>
#include "yolo_v2_class.hpp" // imported functions from DLL
#include "opencv.hpp"
int drawResults(cv::Mat img, std::vector<bbox_t> &results)
{
if (img.empty())
{
std::cout << "drawResults: the image is empty\n";
return -1;
}
if (results.empty())
{
std::cout << "drawResults: the results vector is empty\n";
return -1;
}
int img_w = img.cols;
int img_h = img.rows;
int expd = 10;
for (auto &r : results)
{
if (int(r.x) - expd <= 0 | int(r.x) + r.w + expd >= img_w | int(r.y) - expd <= 0 | int(r.y) + r.h + expd >= img_h) continue;
cv::rectangle(img, cv::Rect(r.x, r.y, r.w, r.h), cv::Scalar(0, 255, 255), 2);
std::string className = std::to_string(r.obj_id);
putText(img, className, cv::Point2f(r.x, r.y - 5), cv::FONT_HERSHEY_COMPLEX_SMALL, 2, cv::Scalar(0, 0, 255), 5);
std::cout << "x:" << r.x << " ,y:" << r.y << "w:" << r.w << "h:" << r.h << std::endl;
/*cv::namedWindow("results", 0);
cv::imshow("results", img);
cv::waitKey(0);*/
}
cv::namedWindow("results", 0);
cv::imshow("results", img);
cv::waitKey(0);
return 0;
}
std::vector<bbox_t> selectResults(cv::Mat &mat_img, std::vector<bbox_t> &results)
{
//去除掉检测出的在边界上的结果
int img_w = mat_img.cols;
int img_h = mat_img.rows;
std::vector<bbox_t> selectedResults;
int expd = 5;
for (auto &r : results)
{
if (int(r.x) - expd <= 0 | int(r.x) + r.w + expd >= img_w | int(r.y) - expd <= 0 | int(r.y) + r.h + expd >= img_h) continue;
selectedResults.push_back(r);
}
return selectedResults;
}
int resultWriteToJson(const std::string jsonPath, const std::string imagePath, const int imgH, const int imgW, const std::vector<bbox_t> &result)
{
//input
//jsonPath: json file abspath,
//imagePath: labelme contain the image path,(write to json)
// a labelme json format annotation
/*
{
"version": "4.5.7",
"flags": {},
"shapes": [
{
"label": "0",
"points": [
[
1587.25,
1060.8333333333335
],
[
1726.8333333333335,
1221.25
]
],
"group_id": null,
"shape_type": "rectangle",
"flags": {}
},
{
"label": "1",
"points": [
[
1197.7500000000002,
1675.5
],
[
1339.416666666667,
1810.9166666666665
]
],
"group_id": null,
"shape_type": "rectangle",
"flags": {}
}
],
"imagePath": "000000012.bmp",
"imageData": null,
"imageHeight": 2000,
"imageWidth": 2400
}
*/
std::ofstream out(jsonPath, std::ios::out);//std::ios::app add to bottom of the file
if (!out.is_open())
{
std::cout << "cant open the " << jsonPath << "!\n";
return -1;
}
// write the json table head
out << "{\n" << "\"version\":\"4.5.7\",\n";
out << "\"flags\" : {},\n";
out << "\"shapes\" : [\n";
//
for (int i = 0; i < result.size(); i++)
{
bbox_t box = result[i];
out << "{\n";
out << "\"label\":" << "\"" << box.obj_id << "\",\n";
out << "\"points\":[\n";
out << "[\n" << box.x << ",\n" << box.y << "\n],\n";
out << "[\n" << box.x + box.w << ",\n" << box.y + box.h << "\n]\n";
out << "],\n";
out << "\"group_id\":null,\n";
out << "\"shape_type\":\"rectangle\",\n";
out << "\"flags\":{}\n";
out << "}";
if (i != result.size() - 1) out << ",\n";//最后一个}后面没有逗号","
}
out << "],\n";
out << "\"imagePath\" :" << "\"" << imagePath << "\",\n";
out << "\"imageData\" :" << "null,\n";
out << "\"imageHeight\":" << imgH << ",\n";
out << "\"imageWidth\":" << imgW << "\n";
out << "}\n";
out.close();
return 0;
}
int demo1()
{
std::string rootPath = "D:/mydoc/VS-proj/SMTDetector/x64/Release/";
//label name file path
std::string names_file = rootPath + "data/SMTDetector.names";
//config file path
std::string cfg_file = rootPath + "cfg/SMTDetector.cfg";
//weights file path
std::string weights_file = rootPath + "model/SMTDetector.weights";
//image file path
//std::string imagePath = rootPath + "data/del/0-5.bmp";
std::string imagePath = "K:\\imageData\\SMTdataset\\image\\000000001.bmp";
//init the detector
Detector detector(cfg_file, weights_file);
cv::Mat img = cv::imread(imagePath);
if (img.empty())
{
std::cout << "the image is empty\n";
return -1;
}
//detect
std::vector<bbox_t> results = detector.detect(img);
results = selectResults(img, results);
//visualize the results
drawResults(img, results);
resultWriteToJson("aaaa.json", "0-1.bmp", img.rows, img.cols, results);
return 0;
}
int demo2()
{
std::string rootPath = "D:/mydoc/VS-proj/SMTDetector/x64/Release/";
//label name file path
std::string names_file = rootPath + "data/SMTDetector.names";
//config file path
std::string cfg_file = rootPath + "cfg/SMTDetector.cfg";
//weights file path
std::string weights_file = rootPath + "model/SMTDetector.weights";
//image file path list
std::string imageFolder = rootPath + "data/del";
std::vector<cv::String> imageList;
cv::glob(imageFolder, imageList);
//init the detector
Detector detector(cfg_file, weights_file);
int num = 0;
for (auto &r : imageList)
{
cv::Mat img = cv::imread(r);
std::cout << "imagepath:" << r << std::endl;
if (img.empty())
{
std::cout << "the image is empty\n";
continue;
}
//detect
std::vector<bbox_t> results = detector.detect(img);
std::vector<bbox_t> ss = selectResults(img, results);
num += results.size();
std::cout << "number of thu:" << ss.size() << std::endl;
//visualize the results
drawResults(img, ss);
}
std::cout << "the total num:" << num << std::endl;
return 0;
}
int demo3()
{
//读取一个文件夹中的所有图片预测,并把结果保存到json文件中
std::string rootPath = "K:/model/SMTDetector/";
//label name file path
std::string names_file = rootPath + "names/SMTDetector.names";
//config file path
std::string cfg_file = rootPath + "cfg/SMTDetector.cfg";
//weights file path
std::string weights_file = rootPath + "model/SMTDetector.weights";
//image file path list
std::string imageFolder = "K:\\imageData\\SMTdataset\\smi";
std::vector<cv::String> imageList;
cv::glob(imageFolder, imageList);
//init the detector
Detector detector(cfg_file, weights_file);
int num = 0;
for (auto &r : imageList)
{
cv::Mat img = cv::imread(r);
std::cout << "imagepath:" << r << std::endl;
if (img.empty())
{
std::cout << "the image is empty\n";
continue;
}
//detect
std::vector<bbox_t> results = detector.detect(img);
results = selectResults(img, results);
num += results.size();
//std::cout << "number of thu:" << results.size() << std::endl;
int index = r.find_last_of("\\");
std::string imageName = r.substr(index + 1,-1);
std::string jsonName = imageName.substr(0, imageName.find_last_of(".")) + ".json";
//std::cout << "json:" << jsonName << "\t image:" << imageName << "\n";
resultWriteToJson(imageFolder+"\\"+jsonName, imageName, img.rows, img.cols, results);
}
std::cout << "the total num:" << num << std::endl;
return 0;
}
int main()
{
demo1();
return 0;
}
网友评论