美文网首页
object_detectionAPI源码阅读笔记(10-voc

object_detectionAPI源码阅读笔记(10-voc

作者: yanghedada | 来源:发表于2018-10-13 13:55 被阅读91次

创建的tfcord文件是

create_my_data_tf_record.py是google object detection api 里面的文件。
如下:

import hashlib
import io
import logging
import os

from lxml import etree
import PIL.Image
import tensorflow as tf

from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
#from tensorflow.models.research.object_detection.utils import label_map_util
#
#from tensorflow.models.research.object_detection.utils import visualization_utils as vis_util


flags = tf.app.flags
flags.DEFINE_string('data_dir', '', 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('set', 'train', 'Convert training set, validation set or '
                    'merged set.')
flags.DEFINE_string('annotations_dir', 'Annotations',
                    '(Relative) path to annotations directory.')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('label_map_path', 'VOCdevkit/data/label_map.pbtxt',
                    'Path to label map proto')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
                     'difficult instances')
FLAGS = flags.FLAGS

SETS = ['train', 'val', 'trainval', 'test']


def dict_to_tf_example(data,
                       dataset_directory,
                       label_map_dict,
                       ignore_difficult_instances=False,
                       image_subdirectory='JPEGImages'):
  """Convert XML derived dict to tf.Example proto.

  Notice that this function normalizes the bounding box coordinates provided
  by the raw data.

  Args:
    data: dict holding PASCAL XML fields for a single image (obtained by
      running dataset_util.recursive_parse_xml_to_dict)
    dataset_directory: Path to root directory holding PASCAL dataset
    label_map_dict: A map from string label names to integers ids.
    ignore_difficult_instances: Whether to skip difficult instances in the
      dataset  (default: False).
    image_subdirectory: String specifying subdirectory within the
      PASCAL dataset directory holding the actual image data.

  Returns:
    example: The converted tf.Example.

  Raises:
    ValueError: if the image pointed to by data['filename'] is not a valid JPEG
  """
  img_path = os.path.join(data['folder'], image_subdirectory, data['filename'])
  full_path = os.path.join(dataset_directory, img_path)
  with tf.gfile.GFile(full_path, 'rb') as fid:
    encoded_jpg = fid.read()
  encoded_jpg_io = io.BytesIO(encoded_jpg)
  image = PIL.Image.open(encoded_jpg_io)
  if image.format != 'JPEG':
    raise ValueError('Image format not JPEG')
  key = hashlib.sha256(encoded_jpg).hexdigest()

  width = int(data['szie']['width'])
  height = int(data['szie']['height'])

  xmin = []
  ymin = []
  xmax = []
  ymax = []
  classes = []
  classes_text = []
  truncated = []
  poses = []
  difficult_obj = []
  for obj in data['object']:
    difficult = bool(int(obj['difficult']))
    if ignore_difficult_instances and difficult:
      continue

    difficult_obj.append(int(difficult))

    xmin.append(float(obj['bndbox']['xmin']) / width)
    ymin.append(float(obj['bndbox']['ymin']) / height)
    xmax.append(float(obj['bndbox']['xmax']) / width)
    ymax.append(float(obj['bndbox']['ymax']) / height)
    classes_text.append(obj['name'].encode('utf8'))
    classes.append(label_map_dict[obj['name']])
    truncated.append(int(obj['truncated']))
    poses.append(obj['pose'].encode('utf8'))

  example = tf.train.Example(features=tf.train.Features(feature={
      'image/height': dataset_util.int64_feature(height),
      'image/width': dataset_util.int64_feature(width),
      'image/filename': dataset_util.bytes_feature(
          data['filename'].encode('utf8')),
      'image/source_id': dataset_util.bytes_feature(
          data['filename'].encode('utf8')),
      'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
      'image/encoded': dataset_util.bytes_feature(encoded_jpg),
      'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
      'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
      'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
      'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
      'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
      'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
      'image/object/class/label': dataset_util.int64_list_feature(classes),
      'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
      'image/object/truncated': dataset_util.int64_list_feature(truncated),
      'image/object/view': dataset_util.bytes_list_feature(poses),
  }))
  return example


def main(_):
  if FLAGS.set not in SETS:
    raise ValueError('set must be in : {}'.format(SETS))

  data_dir = FLAGS.data_dir
  datasets = ['data']

  writer = tf.python_io.TFRecordWriter(FLAGS.output_path)

  label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)

  for dataset in datasets:
    logging.info('Reading from PASCAL %s dataset.', dataset)
    examples_path = os.path.join(data_dir, dataset, 'ImageSets', 'Main/' + FLAGS.set + '.txt')
    annotations_dir = os.path.join(data_dir, dataset, FLAGS.annotations_dir)
    examples_list = dataset_util.read_examples_list(examples_path)
    for idx, example in enumerate(examples_list):
      if idx % 100 == 0:
        logging.info('On image %d of %d', idx, len(examples_list))
      path = os.path.join(annotations_dir, example + '.xml')
      with tf.gfile.GFile(path, 'r') as fid:
        xml_str = fid.read()
      xml = etree.fromstring(xml_str)
      data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

      tf_example = dict_to_tf_example(data, FLAGS.data_dir, label_map_dict,
                                      FLAGS.ignore_difficult_instances)
      writer.write(tf_example.SerializeToString())

  writer.close()


if __name__ == '__main__':
  tf.app.run()

现在读取一个文件tfcord文件

#encoding=utf-8

import tensorflow as tf
import numpy as np
import cv2
import io
from PIL import Image
def parse_tf(example_proto):
    dics = {}

    dics['image/encoded'] = tf.FixedLenFeature(shape=[],dtype=tf.string)
    dics['image/width'] = tf.FixedLenFeature(shape=[], dtype=tf.int64)
    dics['image/height'] = tf.FixedLenFeature(shape=[], dtype=tf.int64)


    dics['image/object/class/text'] = tf.VarLenFeature(tf.string)
    dics['image/filename'] = tf.VarLenFeature(tf.string)
    dics['image/object/class/label'] = tf.VarLenFeature(tf.int64)
    dics['image/object/bbox/xmin'] = tf.VarLenFeature(tf.float32)
    dics['image/object/bbox/xmax'] = tf.VarLenFeature(tf.float32)
    dics['image/object/bbox/ymin'] = tf.VarLenFeature(tf.float32)
    dics['image/object/bbox/ymax'] = tf.VarLenFeature(tf.float32)
    parse_example = tf.parse_single_example(serialized=example_proto,features=dics)
    filename = parse_example['image/filename']
    xmin = parse_example['image/object/bbox/xmin']
    xmax = parse_example['image/object/bbox/xmax']
    ymin = parse_example['image/object/bbox/ymin']
    ymax = parse_example['image/object/bbox/ymax']
    image = parse_example['image/encoded']#tf.decode_raw(parse_example['image/encoded'],out_type=tf.uint8)
    image = img_data = tf.image.decode_jpeg(image)
    w = parse_example['image/width']
    h = parse_example['image/height']
    
    return filename,image,w,h,xmin,xmax,ymin,ymax

dataset = tf.data.TFRecordDataset("./TFrecodr/eval.record")
dataset = dataset.map(parse_tf).batch(1).repeat(1)

iterator = dataset.make_one_shot_iterator()

next_element = iterator.get_next()
with tf.Session() as session:

    for i in range(3):
        filename,image, w, h,xmin, xmax, ymin, ymax = session.run(fetches=next_element)
        #左上角坐标与右下角坐标
        print(filename)
        print(np.squeeze(image).shape, )
        print(image.dtype)
    image = np.squeeze(image)
    image1 = cv2.rectangle(image,(xmin.values[0]*w,ymin.values[0]*h),(xmax.values[0]*w,ymax.values[0]*h),color=(0,255,0))
    cv2.imshow("s",image1)
    cv2.waitKey(0)

使用tensorflow官方提供的api进行测试

对一张图片进行测试,仅仅是可视化。

# encoding=utf-8
import matplotlib.pyplot as plt
import numpy as np
import os 
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from PIL import Image
from object_detection.utils import label_map_util

def parse_tf(example_proto):
    dics = {}

    dics['image/encoded'] = tf.FixedLenFeature(shape=[],dtype=tf.string)
    dics['image/width'] = tf.FixedLenFeature(shape=[], dtype=tf.int64)
    dics['image/height'] = tf.FixedLenFeature(shape=[], dtype=tf.int64)


    dics['image/object/class/text'] = tf.VarLenFeature(tf.string)
    dics['image/filename'] = tf.VarLenFeature(tf.string)
    dics['image/object/class/label'] = tf.VarLenFeature(tf.int64)
    dics['image/object/bbox/xmin'] = tf.VarLenFeature(tf.float32)
    dics['image/object/bbox/xmax'] = tf.VarLenFeature(tf.float32)
    dics['image/object/bbox/ymin'] = tf.VarLenFeature(tf.float32)
    dics['image/object/bbox/ymax'] = tf.VarLenFeature(tf.float32)
    parse_example = tf.parse_single_example(serialized=example_proto,features=dics)
    filename = parse_example['image/filename']
    xmin = parse_example['image/object/bbox/xmin']
    xmax = parse_example['image/object/bbox/xmax']
    ymin = parse_example['image/object/bbox/ymin']
    ymax = parse_example['image/object/bbox/ymax']
    image = parse_example['image/encoded']#tf.decode_raw(parse_example['image/encoded'],out_type=tf.uint8)
    image = img_data = tf.image.decode_jpeg(image)
    w = parse_example['image/width']
    h = parse_example['image/height']
    label = parse_example['image/object/class/label']
    
    return filename,image,w,h,xmin,xmax,ymin,ymax,label

dataset = tf.data.TFRecordDataset("./record/pascal_train.record")
dataset = dataset.map(parse_tf).batch(1).repeat(1)

iterator = dataset.make_one_shot_iterator()

next_element = iterator.get_next()
PATH_TO_LABELS = "record/pascal_label_map.pbtxt"
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map,max_num_classes = 221,use_display_name = True)
category_index = label_map_util.create_category_index(categories)
with tf.Session() as session:

    filename,image, w, h,xmin, xmax, ymin, ymax ,label= session.run(fetches=next_element)                                               
    image_np = np.squeeze(image)
    print(filename)
    #可视化结果
    boxes = list(np.stack((ymin.values, xmin.values, ymax.values, xmax.values),axis=1))
    print(xmin.values)
    classes = list(label.values)
    scores = [[1,1] + [0]*18]
    print(classes ,boxes)
    vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            np.reshape(boxes,(-1,4)),
            np.array(classes),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8)
    plt.figure(figsize=(8,8))
    print(type(image_np))
    print(image_np.shape)
    image_np = np.array(image_np,dtype=np.uint8)            
    plt.imshow(image_np)
    plt.show()

参考:

【TensorFlow系列】【八】目标检测之pascal voc数据预处理

相关文章

网友评论

      本文标题:object_detectionAPI源码阅读笔记(10-voc

      本文链接:https://www.haomeiwen.com/subject/zahnaftx.html