美文网首页TensorFlow技术帖
【Tensorflow】Object Detection API

【Tensorflow】Object Detection API

作者: 下里巴人也 | 来源:发表于2017-12-12 11:05 被阅读124次

    操作系统:Centos7.4

    参考:https://github.com/tensorflow/models/

    1 环境安装

    • 安装anaconda
    [root@localhost home]# mkdir /home/anaconda_install
    [root@localhost home]# cd /home/anaconda_install
    [root@localhost anaconda_install]# wget https://repo.continuum.io/archive/Anaconda2-5.0.1-Linux-x86_64.sh
    [root@localhost anaconda_install]# chmod 777 Anaconda2-5.0.1-Linux-x86_64.sh
    [root@localhost anaconda_install]# ./Anaconda2-5.0.1-Linux-x86_64.sh
    
    • conda安装pip:
    [root@localhost anaconda_install]# conda install pip
    
    • conda安装opencv3.0.0
    [root@localhost anaconda_install]# conda install -c menpo opencv3
    windows系统上对应命令: conda install -c https://conda.binstar.org/menpo opencv 
    
    • 安装tensorflow及依赖
    [root@localhost anaconda_install]# mkdir -p /home/tensorflow/ #并把Object-Detector-App.tar.gz放到该目录解压)
    [root@localhost anaconda_install]# cd /home/tensorflow/
    [root@localhost tensorflow]# pip install tensorflow
    [root@localhost tensorflow]# pip install pillow
    [root@localhost tensorflow]# pip install pillow
    [root@localhost tensorflow]# export PYTHONPATH=:/home/tensorflow/Object-Detector-App:/home/tensorflow/Object-Detector-App/slim
    
    • 如果出现错误(importerror libpng12.so.0 cannot open shared object file no such file or directory),安装png
    yum install libpng12
    
    • 如果出现`CXXABI_1.3.9' not found错误:
    ImportError: /lib64/libstdc++.so.6: version `CXXABI_1.3.9' not found (required by /root/anaconda2/lib/python2.7/site-packages/matplotlib/_path.so)
    
    解决方法:
    [root@localhost Object-Detector-App]# find / -name "libstdc++.so.*"
    find: ‘/run/user/1000/gvfs’: Permission denied
    /root/anaconda2/pkgs/libstdcxx-ng-7.2.0-h7a57d05_2/lib/libstdc++.so.6.0.24
    /root/anaconda2/pkgs/libstdcxx-ng-7.2.0-h7a57d05_2/lib/libstdc++.so.6
    /root/anaconda2/pkgs/libstdcxx-ng-7.2.0-h7a57d05_2/x86_64-conda_cos6-linux-gnu/sysroot/lib/libstdc++.so.6
    /root/anaconda2/pkgs/libstdcxx-ng-7.2.0-h7a57d05_2/x86_64-conda_cos6-linux-gnu/sysroot/lib/libstdc++.so.6.0.24
    /root/anaconda2/lib/libstdc++.so.6
    /root/anaconda2/lib/libstdc++.so.6.0.24
    /root/anaconda2/x86_64-conda_cos6-linux-gnu/sysroot/lib/libstdc++.so.6
    /root/anaconda2/x86_64-conda_cos6-linux-gnu/sysroot/lib/libstdc++.so.6.0.24
    /usr/lib64/libstdc++.so.6
    /usr/lib64/libstdc++.so.6.0.19
    /usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.19-gdb.py
    /usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.19-gdb.pyc
    /usr/share/gdb/auto-load/usr/lib64/libstdc++.so.6.0.19-gdb.pyo
    [root@localhost Object-Detector-App]# cp /root/anaconda2/pkgs/libstdcxx-ng-7.2.0-h7a57d05_2/lib/libstdc++.so.6.0.24 /lib64/
    [root@localhost Object-Detector-App]# mv /lib64/libstdc++.so.6 /lib64/libstdc++.so.6.20171224
    [root@localhost Object-Detector-App]# ln -s /lib64/libstdc++.so.6.0.24 /lib64/libstdc++.so.6
    

    2 识别代码

    2.1 识别的核心代码

    import os
    import cv2
    import time
    import argparse
    import multiprocessing
    import numpy as np
    import tensorflow as tf
    
    from utils.app_utils import FPS, WebcamVideoStream
    from multiprocessing import Queue, Pool
    from object_detection.utils import label_map_util
    from object_detection.utils import visualization_utils as vis_util
    
    CWD_PATH = os.getcwd()
    
    # Path to frozen detection graph. This is the actual model that is used for the object detection.
    MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
    PATH_TO_CKPT = os.path.join(CWD_PATH, 'object_detection', MODEL_NAME, 'frozen_inference_graph.pb')
    
    # List of the strings that is used to add correct label for each box.
    PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt')
    
    NUM_CLASSES = 90
    
    # Loading label map
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
                                                                use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    
    
    def detect_objects(image_np, sess, detection_graph):
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    
        # Each box represents a part of the image where a particular object was detected.
        boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        scores = detection_graph.get_tensor_by_name('detection_scores:0')
        classes = detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')
    
        # Actual detection.
        (boxes, scores, classes, num_detections) = sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict={image_tensor: image_np_expanded})
    
        # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8)
        return image_np
    
    def worker(input_q, output_q):
        # Load a (frozen) Tensorflow model into memory.
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
    
            sess = tf.Session(graph=detection_graph)
    
        fps = FPS().start()
        while True:
            fps.update()
            frame = input_q.get()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            output_q.put(detect_objects(frame_rgb, sess, detection_graph))
    
        fps.stop()
        sess.close()
    
    if __name__ == '__main__':
        parser = argparse.ArgumentParser()
        parser.add_argument('-src', '--source', dest='video_source', type=int,
                            default=0, help='Device index of the camera.')
        parser.add_argument('-wd', '--width', dest='width', type=int,
                            default=480, help='Width of the frames in the video stream.')
        parser.add_argument('-ht', '--height', dest='height', type=int,
                            default=360, help='Height of the frames in the video stream.')
        parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
                            default=2, help='Number of workers.')
        parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
                            default=5, help='Size of the queue.')
        args = parser.parse_args()
    
        logger = multiprocessing.log_to_stderr()
        logger.setLevel(multiprocessing.SUBDEBUG)
    
        input_q = Queue(maxsize=args.queue_size)
        output_q = Queue(maxsize=args.queue_size)
        pool = Pool(args.num_workers, worker, (input_q, output_q))
    
        video_capture = WebcamVideoStream(src=args.video_source,
                                          width=args.width,
                                          height=args.height).start()
        fps = FPS().start()
    
        while True:  # fps._numFrames < 120
            frame = video_capture.read()
            input_q.put(frame)
    
            t = time.time()
    
            output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
            cv2.imshow('Video', output_rgb)
            fps.update()
    
            print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
    
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    
        fps.stop()
        print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
        print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    
        pool.terminate()
        video_capture.stop()
        cv2.destroyAllWindows()
    

    2.2 使用官方模型的运行结果

    • 插入摄像头,查看设备号,我这边使用了4个摄像头,如下所示:

    • 在/home/tensorflow/Object-Detector-App/目录下执行以下命令,打开一个摄像头(--source表示摄像头编号,我这里有0,1,2,3):

      python object_detection_app.py --source=0 --width=640 --height=480

    • 效果如下(我这边同时打开了3个摄像头):

    3 使用自己的模型

    目录结构:

    3.1 准备工作

    • Object Detection API
      源码私聊可以私聊我(qq:479066524)。
      源码包括:
      sd_video_detect.py :视频检测
      sd_pic_detect.py :图片识别
      sd_train/ :训练脚本和资源
      sd_model/ :训练后的模型
      object_detection/ : 主要代码,可以从github拉取
    • 训练自己的模型
      如何训练自己的模型参考前面文章。自动标注图片程序可以私聊我。
      我这里写了个脚本sd_train/train.sh:
    #!/bin/bash
    #set -x
    
    #$1: model name
    export PYTHONPATH=:/home/tensorflow/Object-Detector-App:/home/tensorflow/Object-Detector-App/slim
    
    path=$(dirname `readlink -f $0`)
    
    echo "path: $path"
    
    echo "############step1: generate pascal_train.record############"
    python $path/create_pascal_tf_record.py --data_dir=$path/$1/ \
                                            --label_map_path=$path/$1/pascal_label_map.pbtxt \
                                            --year=VOC2017 \
                                            --set=train \
                                            --output_path=$path/$1/pascal_train.record
    if [ -f "$path/$1/pascal_train.record" ];then
            echo "generate pascal_train.cord successfully: $path/$1/pascal_train.record"
    else
            echo "generate pascal_train.cord failed!!!"
            exit -1
    fi
    
    sleep 1
    echo "############step2: generate pascal_val.record############"
    python $path/create_pascal_tf_record.py --data_dir=$path/$1/ \
                                            --label_map_path=$path/$1/pascal_label_map.pbtxt \
                                            --year=VOC2017 \
                                            --set=val \
                                            --output_path=$path/$1/pascal_val.record
    if [ -f "$path/$1/pascal_val.record" ];then
            echo "generate pascal_val.record successfully: $path/$1/pascal_val.record"
    else
            echo "generate pascal_val.record failed!!!"
            exit -1
    fi
    
    sleep 1
    echo "############step3: training############"
    python $path/../object_detection/train.py --logtostderr \
                                              --train_dir=$path/$1/output \
                                              --pipeline_config_path=$path/$1/ssd_mobilenet_v1_pascal.config
    
    • 生成可用的模型
      我这边也写了个脚本sd_train/model.sh,供参考:
    #!/bin/bash
    set -x
    
    export PYTHONPATH=:/home/tensorflow/Object-Detector-App:/home/tensorflow/Object-Detector-App/slim
    #$1: model name
    path=$(dirname `readlink -f $0`)
    
    echo "path: $path"
    
    #echo "############step1: generate model############"
    #if [ -f "$path/$1/output/" ];then
    #       echo "generate pascal_train.cord successfully: $path/$1/pascal_train.record"
    #else
    #       echo "generate pascal_train.cord failed!!!"
    #       exit -1
    #fi
    
    steps="`cat $path/$1/ssd_mobilenet_v1_pascal.config |grep num_steps |awk '{print $2}'`"
    echo "train steps: $steps"
    
    echo "############generate model############"
    python $path/../object_detection/export_inference_graph.py --input_type image_tensor \
                                            --pipeline_config_path $path/$1/ssd_mobilenet_v1_pascal.config \
                                            --trained_checkpoint_prefix $path/$1/output/model.ckpt-$steps \
                                            --output_directory $path/$1/savedModel
    
    rm -rf $path/../sd_model/$1
    cp -rf $path/$1/savedModel $path/../sd_model/$1
    cp -rf $path/$1/ssd_mobilenet_v1_pascal.config $path/../sd_model/$1/
    cp -rf $path/$1/pascal_label_map.pbtxt $path/../sd_model/$1/
    echo "############completed! path: $path/../sd_model/$1/"
    

    调用脚本后,生成的模型在sd_model/目录下:

    • 使用模型
      我的检测程序sd_video_detect.py:
    import os
    import cv2
    import time
    import argparse
    import multiprocessing
    import numpy as np
    import tensorflow as tf
    
    from utils.app_utils import FPS, WebcamVideoStream
    from multiprocessing import Queue, Pool
    from object_detection.utils import label_map_util
    from object_detection.utils import visualization_utils as vis_util
    
    CWD_PATH = os.getcwd()
    
    parser = argparse.ArgumentParser()
    parser.add_argument('-cn', '--classnum', dest='classnum', type=int,
                            default=90, help='Classes num of the model.')
    parser.add_argument('-model', '--model', dest='model', type=str,
                            default='ssd_mobilenet_v1_coco_11_06_2017', help='the model name you want to run.')
    parser.add_argument('-src', '--source', dest='video_source', type=int,
                            default=0, help='Device index of the camera.')
    parser.add_argument('-wd', '--width', dest='width', type=int,
                            default=480, help='Width of the frames in the video stream.')
    parser.add_argument('-ht', '--height', dest='height', type=int,
                            default=360, help='Height of the frames in the video stream.')
    parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
                            default=2, help='Number of workers.')
    parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
                            default=5, help='Size of the queue.')
    args = parser.parse_args()
    
    # Path to frozen detection graph. This is the actual model that is used for the object detection.
    MODEL_NAME = args.model
    PATH_TO_CKPT = os.path.join(CWD_PATH, 'sd_model', MODEL_NAME, 'frozen_inference_graph.pb')
    
    # List of the strings that is used to add correct label for each box.
    PATH_TO_LABELS = os.path.join(CWD_PATH, 'sd_model', MODEL_NAME, 'pascal_label_map.pbtxt')
    
    NUM_CLASSES = args.classnum
    
    # Loading label map
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
                                                                use_display_name=True)
    category_index = label_map_util.create_category_index(categories)
    
    
    def detect_objects(image_np, sess, detection_graph):
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    
        # Each box represents a part of the image where a particular object was detected.
        boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        scores = detection_graph.get_tensor_by_name('detection_scores:0')
        classes = detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')
    
        # Actual detection.
        (boxes, scores, classes, num_detections) = sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict={image_tensor: image_np_expanded})
    
        # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8)
        return image_np
    
    
    def worker(input_q, output_q):
        # Load a (frozen) Tensorflow model into memory.
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
    
            sess = tf.Session(graph=detection_graph)
    
        fps = FPS().start()
        while True:
            fps.update()
            frame = input_q.get()
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            output_q.put(detect_objects(frame_rgb, sess, detection_graph))
    
        fps.stop()
        sess.close()
    
    
    if __name__ == '__main__':
        #parser = argparse.ArgumentParser()
        #parser.add_argument('-cn', '--classnum', dest='classnum', type=int,
        #                    default=90, help='Classes num of the model.')
        #parser.add_argument('-model', '--model', dest='model', type=string,
        #                    default='ssd_mobilenet_v1_coco_11_06_2017', help='the model name you want to run.')
        #parser.add_argument('-src', '--source', dest='video_source', type=int,
        #                    default=0, help='Device index of the camera.')
        #parser.add_argument('-wd', '--width', dest='width', type=int,
        #                    default=480, help='Width of the frames in the video stream.')
        #parser.add_argument('-ht', '--height', dest='height', type=int,
        #                    default=360, help='Height of the frames in the video stream.')
        #parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
        #                    default=2, help='Number of workers.')
        #parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
        #                    default=5, help='Size of the queue.')
        #args = parser.parse_args()
        
        logger = multiprocessing.log_to_stderr()
        logger.setLevel(multiprocessing.SUBDEBUG)
    
        input_q = Queue(maxsize=args.queue_size)
        output_q = Queue(maxsize=args.queue_size)
        pool = Pool(args.num_workers, worker, (input_q, output_q))
    
        video_capture = WebcamVideoStream(src=args.video_source,
                                          width=args.width,
                                          height=args.height).start()
        fps = FPS().start()
    
        while True:  # fps._numFrames < 120
            frame = video_capture.read()
            input_q.put(frame)
    
            t = time.time()
    
            output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
            cv2.imshow('Video', output_rgb)
            fps.update()
    
            print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
    
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    
        fps.stop()
        print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
        print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    
        pool.terminate()
        video_capture.stop()
        cv2.destroyAllWindows()
    

    调用命令:

    python sd_video_detect.py --classnum=2 \
                              --model=ssd_model_2017_12_15 \
                              --source=0 \
                              --width=640 \
                              --height=480
    classnum: 表示模型中,物品种类数,我这边是两个;
    model: 表示使用什么模型;
    source: 表示使用的视频源;
    width: 像素宽;
    height: 像素高;
    

    结果如下图:

    红茶1
    雪梨1
    雪梨2

    需要源码的可以Q我:479066524

    相关文章

      网友评论

      • f7672f94cfa7:如果在视频中监测人脸数据,在数量多的情况下,速度如何?文中视频监测物体会发生卡顿现象吗?
        下里巴人也:@海岸线_0be1 不会的
        下里巴人也:@海岸线_0be1 不会卡顿

      本文标题:【Tensorflow】Object Detection API

      本文链接:https://www.haomeiwen.com/subject/gjbmbxtx.html