前言
人脸检测跟踪技术在监控安防, 消费领域用途广泛, 比如手机上的人脸解锁, 美颜App。 人脸检测技术最经典的算法之一——Haar Cascade Classifier级联分类器在opencv中早已经被集成,Haar特征比较简单,巧妙利于图像积分图进行快速计算,但是缺点就是精度不高。基于深度学习的目标检测算法层出不穷,犹如雨后春笋,人脸检测由于其特殊性发展出了一系列专用的检测算法,比如深度级联人脸检测, MTCNN等。
MTCNN是基于深度学习的人脸检测算法,MTCNN算法不仅包含人脸检测,还可以进行人脸对齐(Face Alignment),细节不展开叙述。
本文采用开源的人脸检测跟踪工程,利用pybind11将其封装为python接口,在python中实现人脸检测跟踪。
开发测试环境
- windows 10, 64bit
- Anaconda3, with pyhon 3.7
- Visual Studio 2017
- pycharm
- opencv3.4.0
- ncnn(tencent开源深度学习库)
NCNN环境配置
步骤:
下载ncnn库:
https://github.com/Tencent/ncnn
使用cmake进行编译
使用visual studio编译生成ncnn.lib
image.png image.pngpython API封装
继承faceTrack
类, 将其封装为python的类
PYBIND11_MODULE(face_tracking_demo, m) {
NDArrayConverter::init_numpy();
py::class_<FaceTracker>(m, "FaceTracker")
.def(py::init<>())
.def("trackerInit", &FaceTracker::trackerInit, py::arg("model_path"), py::arg("min_face"))
.def("trackerUpdate", &FaceTracker::trackerUpdate, py::arg("img"));
}
demo.cpp
#include <opencv2/opencv.hpp>
#include"include/ncnn_mtcnn_tld_so.hpp"
#include <stdio.h>
#include<pybind11/pybind11.h>
#include<pybind11/stl.h>
#include<pybind11/numpy.h>
#include"ndarray_converter.h"
using namespace cv;
using namespace std;
namespace py = pybind11;
class FaceTracker :private faceTrack
{
public:
FaceTracker() { faceTrack(); };
~FaceTracker() {};
public:
void trackerInit(const std::string& model_path, const int min_face) {
this->Init(model_path, min_face);
}
std::vector<int> trackerUpdate(cv::Mat& image) {
cv::Rect rect;
this->DetectFace(rect, image);
return vector<int>{rect.x, rect.y, rect.x + rect.width, rect.y + rect.height};
};
public:
std::string version = "v1.0.0";
};
#if 0
int main() {
cv::VideoCapture capture;
capture.open("./test.avi");
cv::Mat frame;
faceTrack tracker;
std::string modelPath = "./models";
int minFace = 40;
tracker.Init(modelPath, minFace);
while (capture.read(frame)) {
int q = cv::waitKey(1);
if (q == 27) break;
cv::Rect result;
double t1 = (double)getTickCount();
tracker.DetectFace(result, frame);
printf("total %gms\n", ((double)getTickCount() - t1) * 1000 / getTickFrequency());
printf("------------------\n");
rectangle(frame, result, Scalar(0, 0, 255), 2);
imshow("frame", frame);
// outputVideo << frame;
}
// outputVideo.release();
capture.release();
cv::destroyAllWindows();
return 0;
}
#endif // 0
#if 1
PYBIND11_MODULE(face_tracking_demo, m) {
NDArrayConverter::init_numpy();
py::class_<FaceTracker>(m, "FaceTracker")
.def(py::init<>())
.def("trackerInit", &FaceTracker::trackerInit, py::arg("model_path"), py::arg("min_face"))
.def("trackerUpdate", &FaceTracker::trackerUpdate, py::arg("img"));
}
#endif
python测试代码
import demo16.face_tracking_demo as demo
import cv2
capture = cv2.VideoCapture()
capture.open('./demo16/test.avi')
tracker = demo.FaceTracker()
tracker.trackerInit(model_path='./demo16/models/', min_face=40)
while True:
ret, frame = capture.read()
if not ret:
print('Finish!')
break
rect = tracker.trackerUpdate(frame)
cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 255, 255), 2)
cv2.imshow('tracking', frame)
cv2.waitKey(33)
跟踪结果
- video1
-
video2
image.png
-
video3
image.png
End
感谢甜心 的支持
网友评论