该测试欲达成目标是实现人脸跟随!
1 硬件
摄像头:1个;
USB数据线:1个;
舵机:1个;
Arduino控制板:1个。
此处应有图:
1.1 准备
这里需要使用usb-cam软件包:
$ cd ~/catkin_ws/src
$ git clone https://github.com/bosch-ros-pkg/usb_cam.git
$ cd ~/catkin_ws
$ catkin_make
注意:下面的程序运行前,需要启动usb_cam节点。
roslaunch usb_cam usb_cam-test.launch
运行上面的节点,这时该节点会不断的发布/usb_cam/image_raw话题。如下图所示:
下面的程序将使用这个话题数据。
2 程序
2.1 人脸识别节点,发布一个话题"chatter",为人脸在图像的位置。
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <std_msgs/Int16.h>
#include <opencv2/objdetect.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core.hpp>
using namespace std;
using namespace cv;
CascadeClassifier face_cascade;
static const std::string OPENCV_WINDOW = "Raw Image window";
class Face_Detector
{
ros::NodeHandle nh_;
image_transport::ImageTransport it_;
image_transport::Subscriber image_sub_;
ros::Publisher chatter_pub;
public:
Face_Detector()
: it_(nh_)
{
// Subscribe to input video feed and publish output video feed
image_sub_ = it_.subscribe("/usb_cam/image_raw", 1,
&Face_Detector::imageCb, this);
chatter_pub = nh_.advertise<std_msgs::Int16>("chatter", 100);
cv::namedWindow(OPENCV_WINDOW);
}
~Face_Detector()
{
cv::destroyWindow(OPENCV_WINDOW);
}
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImagePtr cv_ptr;
namespace enc = sensor_msgs::image_encodings;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
// Draw an example circle on the video stream
if (cv_ptr->image.rows > 400 && cv_ptr->image.cols > 600){
detect_faces(cv_ptr->image);
//chatter_pub.publish(position_x);
}
}
void detect_faces(cv::Mat img)
{
RNG rng( 0xFFFFFFFF );
std_msgs::Int16 position_x;
int data;
int lineType = 8;
Mat frame_gray;
cvtColor( img, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
std::vector<Rect> faces;
face_cascade.detectMultiScale( frame_gray, faces );
if (faces.size()>=1){
size_t i = 0;
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( img, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4 );
data = cvRound(faces[i].x + faces[i].width/2);
position_x.data = data;
chatter_pub.publish(position_x);
}
imshow(OPENCV_WINDOW,img);
waitKey(3);
}
};
int main(int argc, char** argv)
{
//从命令行读取必要的信息,注意路径
CommandLineParser parser(argc, argv,
"{help h||}"
"{face_cascade|/home/junjun/projects/main2/haarcascade_frontalface_alt.xml|Path to face cascade.}");
parser.about( "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n"
"You can use Haar or LBP features.\n\n" );
parser.printMessage();
String face_cascade_name = parser.get<String>("face_cascade");
//-- 1\. Load the cascades
if( !face_cascade.load( face_cascade_name ) )
{
cout << "--(!)Error loading face cascade\n";
return -1;
};
ros::init(argc, argv, "Face_Detector");
Face_Detector ic;
ros::spin();
return 0;
}
2.1.1 CMakeLists.txt文件
这个很重要,以后再写程序可以直接参考这个文件
cmake_minimum_required(VERSION 2.8.3)
project(cv_bridge_tutorial_pkg)
find_package(catkin REQUIRED COMPONENTS
cv_bridge
image_transport
roscpp
sensor_msgs
std_msgs
)
find_package( OpenCV REQUIRED )
catkin_package()
include_directories(
${catkin_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
)
add_executable(sample_cv_bridge_node src/sample_cv_bridge_node.cpp)
target_link_libraries(sample_cv_bridge_node
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
include_directories(${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS})
2.1.2 package.xml文件
<?xml version="1.0"?>
<package>
<name>cv_bridge_tutorial_pkg</name>
<version>0.0.0</version>
<description>The cv_bridge_tutorial_pkg package</description>
<maintainer email="junjunn@todo.todo">lentin</maintainer>
<license>TODO</license>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<run_depend>cv_bridge</run_depend>
<run_depend>image_transport</run_depend>
<run_depend>roscpp</run_depend>
<run_depend>sensor_msgs</run_depend>
<run_depend>std_msgs</run_depend>
<export>
</export>
</package>
2.2 人脸跟随的控制程序(arduino)
订阅话题"chatter"
#include <ros.h>
#include <std_msgs/Int16.h>
#include <Servo.h>
ros::NodeHandle nh;
Servo myservo;
int servoPin=9;
int servo_step_distance = 5; //此外可以修改,太大容易超调,不容易稳定;太小,舵机不动;
int position_x = 90;
int screenmaxx = 640;
int center_offset = 80;
int center_right = (screenmaxx / 2) - center_offset;
int center_left = (screenmaxx / 2) + center_offset;
void messageCb(std_msgs::Int16 servo_msg){
digitalWrite(13,HIGH);
int position = servo_msg.data;
//当人脸位于左侧,舵机向左转。
if (position > center_left){
position_x -= servo_step_distance;
if (position_x >= 10 and position_x <=170)
myservo.write(position_x);
}
//当人脸位于右侧,舵机向右转。
else if (position < center_right){
position_x += servo_step_distance;
if (position_x >= 10 and position_x <=170)
myservo.write(position_x);
}
delay(1);
digitalWrite(13,LOW);
}
ros::Subscriber<std_msgs::Int16> sub("chatter", &messageCb );
void setup()
{
pinMode(13, OUTPUT);
myservo.attach(servoPin);
nh.initNode();
nh.subscribe(sub);
myservo.write(position_x); // tell servo to go to position in variable 'pos'
}
void loop()
{
nh.spinOnce();
delay(1);
}
把程序烧入Arduino板子中。
3 运行
首先运行:roscore
然后:roslaunch usb_cam usb_cam-test.launch
然后:rosrun rosserial_python serial_node.py /dev/ttyACM0
最后:rosrun cv_bridge_tutorial_pkg sample_cv_bridge_node
应该放一个视频的,但貌似不支持,拍了一个小视频放到了抖音上!最后截一张图。
人脸跟随,人体识别,身份证,车牌号识别等技术都归纳在NDK体系。这里整理的了技能图,以及视频资料;
NDK开发腾讯T3 Android高级技术视频教程.png加群免费领取安卓进阶视频教程,源码,面试资料,群内有大牛一起交流讨论技术;964557053。点击链接加入群聊
(包括自定义控件、NDK、架构设计、混合式开发工程师(React native,Weex)、性能优化、完整商业项目开发等)
网友评论