摘要:
labelme是广泛使用的深度学习标注工具,支持目标检测和实例分割等任务的标注,但是一些框架如detectron2,solo等需要的是coco格式的,这里提供一个示例把labelme标注的格式转为coco格式。
github项目链接:labelme2coco
@toc
1. 安装labelme2coco
在这里插入图片描述下载解压。
注:使用usage里面的方法,用pip install labelme2coco 安装,然后用labelme2coco.convert()方法,我这里会报labelme2coco没有.convert属性的错误(2021/06/24)所以用另外一种方式。
-
解压出来应该是下面这样的
在这里插入图片描述 - 打开命令行到labelme2coco解压的路径下,输入以下命令安装
python setup.py install
在这里插入图片描述
2. 使用
-
labelme标注的json文件和图片在同一目录下,
在这里插入图片描述 -
新建一个文件,例如命名为l2c.py
import os
import json
import PIL.Image
import PIL.ImageDraw
import numpy as np
from labelme2coco.utils import create_dir, list_jsons_recursively
from labelme2coco.image_utils import read_image_shape_as_dict
class labelme2coco(object):
def __init__(self, labelme_folder='', save_json_path='./new.json'):
"""
Args:
labelme_folder: folder that contains labelme annotations and image files
save_json_path: path for coco json to be saved
"""
self.save_json_path = save_json_path
self.images = []
self.categories = []
self.annotations = []
self.label = []
self.annID = 1
self.height = 0
self.width = 0
# create save dir
save_json_dir = os.path.dirname(save_json_path)
create_dir(save_json_dir)
# get json list
_, labelme_json = list_jsons_recursively(labelme_folder)
self.labelme_json = labelme_json
self.save_json()
def data_transfer(self):
for num, json_path in enumerate(self.labelme_json):
with open(json_path, 'r') as fp:
# load json
data = json.load(fp)
# (prefix, res) = os.path.split(json_path)
# (file_name, extension) = os.path.splitext(res)
self.images.append(self.image(data, num, json_path))
for shapes in data['shapes']:
label = shapes['label']
if label not in self.label:
self.categories.append(self.category(label))
self.label.append(label)
points = shapes['points']
self.annotations.append(self.annotation(points, label, num))
self.annID += 1
def image(self, data, num, json_path):
image = {}
# get image path
_, img_extension = os.path.splitext(data["imagePath"])
image_path = json_path.replace(".json", img_extension)
img_shape = read_image_shape_as_dict(image_path)
height, width = img_shape['height'], img_shape['width']
image['height'] = height
image['width'] = width
image['id'] = int(num + 1)
image['file_name'] = image_path
self.height = height
self.width = width
return image
def category(self, label):
category = {}
category['supercategory'] = label
category['id'] = int(len(self.label) + 1)
category['name'] = label
return category
def annotation(self, points, label, num):
annotation = {}
annotation['iscrowd'] = 0
annotation['image_id'] = int(num + 1)
annotation['bbox'] = list(map(float, self.getbbox(points)))
# coarsely from bbox to segmentation
x = annotation['bbox'][0]
y = annotation['bbox'][1]
w = annotation['bbox'][2]
h = annotation['bbox'][3]
annotation['segmentation'] = [np.asarray(points).flatten().tolist()]
annotation['category_id'] = self.getcatid(label)
annotation['id'] = int(self.annID)
# add area info
annotation['area'] = self.height * self.width # the area is not used for detection
return annotation
def getcatid(self, label):
for categorie in self.categories:
if label == categorie['name']:
return categorie['id']
# if label[1]==categorie['name']:
# return categorie['id']
return -1
def getbbox(self,points):
# img = np.zeros([self.height,self.width],np.uint8)
# cv2.polylines(img, [np.asarray(points)], True, 1, lineType=cv2.LINE_AA)
# cv2.fillPoly(img, [np.asarray(points)], 1)
polygons = points
mask = self.polygons_to_mask([self.height, self.width], polygons)
return self.mask2box(mask)
def mask2box(self, mask):
# np.where(mask==1)
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows) # y
left_top_c = np.min(clos) # x
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c-left_top_c, right_bottom_r-left_top_r] # [x1,y1,w,h] for coco box format
def polygons_to_mask(self, img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = PIL.Image.fromarray(mask)
xy = list(map(tuple, polygons))
PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def data2coco(self):
data_coco = {}
data_coco['images'] = self.images
data_coco['categories'] = self.categories
data_coco['annotations'] = self.annotations
return data_coco
def save_json(self):
self.data_transfer()
self.data_coco = self.data2coco()
json.dump(self.data_coco, open(self.save_json_path, 'w', encoding='utf-8'), indent=4, separators=(',', ': '), cls=MyEncoder)
# type check when save json files
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
if __name__ == "__main__":
#labelme_folder 你的标注图片和标签所在的文件夹
labelme_folder = r"K:\del"
#save_json_path 转换生成的coco格式的标签文件的保存路径
save_json_path = r"K:\del\train_coco_format.json"
labelme2coco(labelme_folder, save_json_path)
-
现在整个项目如下
在这里插入图片描述 -
命令行下输入 python l2c.py进行转换
[图片上传失败...(image-da180b-1624525813812)]
在这里插入图片描述
3.检验转换是否正确
查看新转换的coco格式的标签是否正确。例如示例标注的是分割任务,像下面这样的
这里加了点处理以保护原数据
用以下代码可以查看转换的coco格式的标注结果,可以看到结果时正确的。
import os
from pycocotools.coco import COCO
from skimage import io
from matplotlib import pyplot as plt
json_file = r'K:\del\train_coco_format.json'
dataset_dir = r''
coco = COCO(json_file)
catIds = coco.getCatIds(catNms=['0','1']) # 我标注的图片中用0 和 1表示不同类型别
imgIds = coco.getImgIds(catIds=catIds ) # 图片id,许多值
for i in range(len(imgIds)):
img = coco.loadImgs(imgIds[i])[0]
I = io.imread(dataset_dir + img['file_name'])
plt.axis('off')
plt.imshow(I) #绘制图像,显示交给plt.show()处理
annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
coco.showAnns(anns)
plt.show() #显示图像
在这里插入图片描述
网友评论