美文网首页OpenCVOpenCv
用solvepnp求距离和角度

用solvepnp求距离和角度

作者: mooin | 来源:发表于2020-02-12 14:10 被阅读0次

    最近正在用opencv实现测距,看到网上有完整代码的不多,所以就来写一篇。参考了这篇文章:
    1.Opencv:SolvePNP
    本文章使用的opencv版本:4.1.1
    这篇博客就不说pnp的理论知识了,网上有很多,自认写不出更高级的东西来。完整的代码在最后面,需要什么资料留言就好,我每天都会看。


    1.相机标定

    想要获得三维世界中的坐标,首先需要对相机进行标定。标定的方法可以看我的另一片博客:matlab标定并用opencv验证,是用matlab进行标定参数的求解,因为matlab标定相机不用代码,直接图形化界面操作,很简单。

    2.测量世界坐标

    我们知道pnp问题至少需要四组解,就是说,在待测量的图片中,我们最少要知道四个点的相互关系,也就是四个点之间的相对坐标。下面举例说明:
    这是我待测量的图片(图片较大,加载不出来稍等):


    原图.png

    现在需要找出四个点,作为基准。我用opencv的角点检测得到四个点,代码如下:

    #include <iostream>
    #include <opencv2/opencv.hpp>
    #include <fstream>
    #include <opencv2/ml.hpp>
    using namespace std;
    using namespace cv;
    using namespace cv::ml;
    int main()
    {
        Mat orignal_image = imread("/root/桌面/IMG_3354.png");
        cv::Mat gray_image , after_harris_image;
        cv::Mat norm_image; //归一化后的图
        cv::Mat scaled_image; //线性变换后的八位无符号整形的图
        cv::cvtColor (orignal_image,gray_image,COLOR_BGR2GRAY);     // 灰度变换
        vector<Point2f> corners;//提供初始角点的坐标位置和精确的坐标的位置
        int maxcorners = 4;
        double qualityLevel = 0.01;  //角点检测可接受的最小特征值
        double minDistance = 10;    //角点之间最小距离
        int blockSize = 3;//计算导数自相关矩阵时指定的领域范围
        double  k = 0.04; //权重系数
    
        goodFeaturesToTrack(gray_image, corners, maxcorners, qualityLevel, minDistance, Mat(), blockSize, false, k);
        //Mat():表示感兴趣区域;false:表示不用Harris角点检测
        //输出角点信息
        cout << "角点信息为:" << corners.size() << endl;
        //绘制角点
        for (unsigned i = 0; i < corners.size(); i++)
        {
            circle(orignal_image, corners[i], 10, cv::Scalar(10, 255, 0), -1, 8, 0);
            cout << "角点坐标:" << corners[i] << endl;
        }
        //namedWindow("image",0);
        //imshow("iamge",orignal_image);
        imwrite("角点.png",orignal_image);
        waitKey(0);
        return 0;
    }
    

    结果如下所示:


    角点.png

    图中四个绿色的点就是检测到的角点(图片看起来不太一样是因为太大了无法上传,裁剪了一下),根据opencv返回的四个点的相机坐标为1(1275,1968),2(1464,2007),3(1303,2102),4(1187,2042)。相机坐标是以左上角为零点,向右为x轴正方向,向下为y轴正方向。
    接下来要测量这四个点之间的实际长度,经测量,得:1(0,0),2(12.5,2.5),3(2.5,8),4(-4.5,5),以点1为原点,向右为x轴正方向,向下为y轴正方向。我这里的单位用的是mm,用什么单位最后计算的结果也是什么单位。

    3.计算

    3.1计算旋转角

    //计算相机旋转角
        double theta_x, theta_y,theta_z;
        double PI = 3.14;
        theta_x = atan2(rotM.at<double>(2, 1), rotM.at<double>(2, 2));
        theta_y = atan2(-rotM.at<double>(2, 0),
        sqrt(rotM.at<double>(2, 1)*rotM.at<double>(2, 1) + rotM.at<double>(2, 2)*rotM.at<double>(2, 2)));
        theta_z = atan2(rotM.at<double>(1, 0), rotM.at<double>(0, 0));
        theta_x = theta_x * (180 / PI);
        theta_y = theta_y * (180 / PI);
        theta_z = theta_z * (180 / PI);
    

    3.2计算深度

        //计算深度
        Mat P;
        P = (rotM.t()) * tvecs;//将旋转向量变换为旋转矩阵后叉乘平移向量
    

    其中P的z轴坐标就是深度信息。


    完整代码如下:

    #include <opencv2/calib3d.hpp>
    #include <iostream>
    #include <opencv2/opencv.hpp>
    #include <fstream>
    using namespace std;
    using namespace cv;
    int main(){
        //相机内参矩阵与外参矩阵
        Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
        cameraMatrix.at<double>(0, 0) = 3374.07818952427;
        cameraMatrix.at<double>(0, 1) = -2.78181259296951;
        cameraMatrix.at<double>(0, 2) = 2019.19661037399;
        cameraMatrix.at<double>(1, 1) = 3374.34656463011;
        cameraMatrix.at<double>(1, 2) = 1501.95020619850;
        cameraMatrix.at<double>(2, 2) = 1;
    
        Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
        distCoeffs.at<double>(0, 0) =  0.173230511639020;
        distCoeffs.at<double>(1, 0) = -0.645138161101467;
        distCoeffs.at<double>(2, 0) = -0.00109294300160736;
        distCoeffs.at<double>(3, 0) = -3.47866401740176e-06;
        distCoeffs.at<double>(4, 0) = 0;
    
        //将控制点在世界坐标系的坐标压入容器
        vector<Point3f> objP;
        objP.clear();
        objP.push_back(Point3f(0, 0, 0));
        objP.push_back(Point3f(12.5, 2.5, 0));
        objP.push_back(Point3f(2.5, 8, 0));
        objP.push_back(Point3f(-4.5, 5, 0));
    
        //将之前已经检测到的角点的坐标压入容器
        std::vector<Point2f> points;
        points.clear();
        points.push_back(Point2f(1275,1968));
        points.push_back(Point2f(1464,2007));
        points.push_back(Point2f(1303,2102));
        points.push_back(Point2f(1187,2042));
    
        //创建旋转矩阵和平移矩阵
        Mat rvecs = Mat::zeros(3,1,CV_64FC1);
        Mat tvecs = Mat::zeros(3,1,CV_64FC1);
    
        //求解pnp
        solvePnP(objP, points, cameraMatrix, distCoeffs, rvecs, tvecs);
        Mat rotM = Mat::eye(3,3,CV_64F);
        Mat rotT = Mat::eye(3,3,CV_64F);
        Rodrigues(rvecs, rotM);  //将旋转向量变换成旋转矩阵
        Rodrigues(tvecs, rotT);
    
        //计算相机旋转角
        double theta_x, theta_y,theta_z;
        double PI = 3.14;
        theta_x = atan2(rotM.at<double>(2, 1), rotM.at<double>(2, 2));
        theta_y = atan2(-rotM.at<double>(2, 0),
        sqrt(rotM.at<double>(2, 1)*rotM.at<double>(2, 1) + rotM.at<double>(2, 2)*rotM.at<double>(2, 2)));
        theta_z = atan2(rotM.at<double>(1, 0), rotM.at<double>(0, 0));
        theta_x = theta_x * (180 / PI);
        theta_y = theta_y * (180 / PI);
        theta_z = theta_z * (180 / PI);
    
        //计算深度
        Mat P;
        P = (rotM.t()) * tvecs;
    
        //输出
        cout<<"角度"<<endl;
        cout<<theta_x<<endl;
        cout<<theta_y<<endl;
        cout<<theta_z<<endl;
        cout<<P<<endl;
    
        return 0;
    }
    

    有问题欢迎留言交流!

    相关文章

      网友评论

        本文标题:用solvepnp求距离和角度

        本文链接:https://www.haomeiwen.com/subject/qwoqfhtx.html