美文网首页
mmPose HRnetv2 pytorch—>onnx—>op

mmPose HRnetv2 pytorch—>onnx—>op

作者: 小白CV | 来源:发表于2022-02-24 10:55 被阅读0次

    HRnetv2 pytorch—>onnx—>opencv推理

    一、下载模型、克隆项目

    在mmpose官方下载https://mmpose.readthedocs.io/zh_CN/latest/topics/hand%282d%29.html


    项目克隆:
    git cloen https://github.com/open-mmlab/mmpose
    

    需要的库支持:
    onnx
    onnxruntime
    mmpose
    mmcv

    二、模型转换pytorch—>onnx

    在mmpose根目录下:

    python tools/deployment/pytorch2onnx.py configs/hand/2d_kpt_sview_rgb_img/topdown_heatmap/coco_wholebody_hand/hrnetv2_w18_coco_wholebody_hand_256x256.py /workspace/downloads/hrnetv2_w18_coco_wholebody_hand_256x256-1c028db7_20210908.pth --output-file hrnetv2_w18_coco_wholebody_hand_256x256.onnx
    ```![](https://img.haomeiwen.com/i15646173/aa569e812f81c8f6.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
    ![![![![Untitled.png](https://img.haomeiwen.com/i15646173/c57edce8ca3d85d2.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
    ](https://img.haomeiwen.com/i15646173/eab84a4374502725.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
    ](https://img.haomeiwen.com/i15646173/7461750f4f57fdf2.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
    ](https://img.haomeiwen.com/i15646173/df22f88d988833fa.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240)
    
    会有警告,为了避免出现其他问题,这里参照网上:
    
    ```bash
    python remove_initializer_from_input.py --input your_old_model.onnx --output your_new_model.onnx
    

    remove_initializer_from_input.py代码如下:

    import onnx
    import argparse
    
    def get_args():
        parser = argparse.ArgumentParser()
        parser.add_argument("--input", required=True, help="input model")
        parser.add_argument("--output", required=True, help="output model")
        args = parser.parse_args()
        return args
    
    def remove_initializer_from_input():
        args = get_args()
    
        model = onnx.load(args.input)
        if model.ir_version < 4:
            print(
                'Model with ir_version below 4 requires to include initilizer in graph input'
            )
            return
    
        inputs = model.graph.input
        name_to_input = {}
        for input in inputs:
            name_to_input[input.name] = input
    
        for initializer in model.graph.initializer:
            if initializer.name in name_to_input:
                inputs.remove(name_to_input[initializer.name])
    
        onnx.save(model, args.output)
    
    if __name__ == '__main__':
        remove_initializer_from_input()
    

    三、opencv推理代码

    #include <opencv2/dnn.hpp>
    #include <opencv2/imgproc.hpp>
    #include <opencv2/highgui.hpp>
    
    using namespace cv;
    using namespace cv::dnn;
    #include <iostream>
    using namespace std;
    // connection table, in the format [model_id][pair_id][from/to]
    // please look at the nice explanation at the bottom of:
    // https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/output.md
    //
    const int POSE_PAIRS[3][20][2] = {
    {   // COCO body
        {1,2}, {1,5}, {2,3},
        {3,4}, {5,6}, {6,7},
        {1,8}, {8,9}, {9,10},
        {1,11}, {11,12}, {12,13},
        {1,0}, {0,14},
        {14,16}, {0,15}, {15,17}
    },
    {   // MPI body
        {0,1}, {1,2}, {2,3},
        {3,4}, {1,5}, {5,6},
        {6,7}, {1,14}, {14,8}, {8,9},
        {9,10}, {14,11}, {11,12}, {12,13}
    },
    {   // hand
        {0,1}, {1,2}, {2,3}, {3,4},         // thumb
        {0,5}, {5,6}, {6,7}, {7,8},         // pinkie
        {0,9}, {9,10}, {10,11}, {11,12},    // middle
        {0,13}, {13,14}, {14,15}, {15,16},  // ring
        {0,17}, {17,18}, {18,19}, {19,20}   // small
    } };
    int main(int argc, char **argv)
    {
        CommandLineParser parser(argc, argv,
            "{ h help           | false     | print this help message }"
            "{ p proto          |           | (required) model configuration, e.g. hand/pose.prototxt }"
            "{ m model          |           | (required) model weights, e.g. hand/pose_iter_102000.caffemodel }"
            "{ i image          |           | (required) path to image file (containing a single person, or hand) }"
            "{ d dataset        |           | specify what kind of model was trained. It could be (COCO, MPI, HAND) depends on dataset. }"
            "{ width            |  368      | Preprocess input image by resizing to a specific width. }"
            "{ height           |  368      | Preprocess input image by resizing to a specific height. }"
            "{ t threshold      |  0.1      | threshold or confidence value for the heatmap }"
            "{ s scale          |  0.003922 | scale for blob }"
        );
        //    cv::String modelTxt = samples::findFile(parser.get<string>("proto"));
        //    cv::String modelBin = samples::findFile(parser.get<string>("model"));
        //    cv::String imageFile = samples::findFile(parser.get<String>("image"));
        //    cv::String dataset = parser.get<cv::String>("dataset");
        //    int W_in = parser.get<int>("width");
        //    int H_in = parser.get<int>("height");
        //    float thresh = parser.get<float>("threshold");
        //    float scale  = parser.get<float>("scale");
    
        cv::String modelOnnx = "C:\\Users\\haihan\\Desktop\\new_hrnetv2_w18_coco_wholebody_hand_256x256.onnx";
        cv::String modelTxt = "E:\\openpose_pose_coco.prototxt";
        cv::String modelBin = "E:\\code\\openpose-1.7.0\\models\\pose\\coco\\pose_iter_440000.caffemodel";
        cv::String imageFile = "E:\\code\\openpose-1.7.0\\examples\\media\\1.png";
        cv::String dataset = "COCO";
        int W_in = 256;
        int H_in = 256;
        float thresh = 0.1f;
        float scale = 0.003922f;
    
        if (parser.get<bool>("help") || modelTxt.empty() || modelBin.empty() || imageFile.empty())
        {
            cout << "A sample app to demonstrate human or hand pose detection with a pretrained OpenPose dnn." << endl;
            parser.printMessage();
            return 0;
        }
        int midx, npairs, nparts;
        if (!dataset.compare("COCO")) { midx = 0; npairs = 17; nparts = 18; }
        else if (!dataset.compare("MPI")) { midx = 1; npairs = 14; nparts = 16; }
        else if (!dataset.compare("HAND")) { midx = 2; npairs = 20; nparts = 22; }
        else
        {
            std::cerr << "Can't interpret dataset parameter: " << dataset << std::endl;
            exit(-1);
        }
        // read the network model
        Net net = readNetFromONNX(modelOnnx);
        
        // and the image
        Mat img = imread(imageFile);
        if (img.empty())
        {
            std::cerr << "Can't read image from the file: " << imageFile << std::endl;
            exit(-1);
        }
        Mat img_rgb;
        cvtColor(img, img_rgb, COLOR_BGR2RGB);
        // send it through the network
        Mat inputBlob = blobFromImage(img_rgb, scale, Size(W_in, H_in), Scalar(0, 0, 0), false, false);
        net.setInput(inputBlob);
        Mat result = net.forward();
        // the result is an array of "heatmaps", the probability of a body part being in location x,y
        int H = result.size[2];
        int W = result.size[3];
        // find the position of the body parts
        vector<Point> points(22);
        for (int n = 0; n < nparts; n++)
        {
            // Slice heatmap of corresponding body's part.
            Mat heatMap(H, W, CV_32F, result.ptr(0, n));
            // 1 maximum per heatmap
            Point p(-1, -1), pm;
            double conf;
            minMaxLoc(heatMap, 0, &conf, 0, &pm);
            if (conf > thresh)
                p = pm;
            points[n] = p;
        }
        // connect body parts and draw it !
        float SX = float(img.cols) / W;
        float SY = float(img.rows) / H;
        for (int n = 0; n < npairs; n++)
        {
            // lookup 2 connected body/hand parts
            Point2f a = points[POSE_PAIRS[midx][n][0]];
            Point2f b = points[POSE_PAIRS[midx][n][1]];
            // we did not find enough confidence before
            if (a.x <= 0 || a.y <= 0 || b.x <= 0 || b.y <= 0)
                continue;
            // scale to image size
            a.x *= SX; a.y *= SY;
            b.x *= SX; b.y *= SY;
            line(img, a, b, Scalar(0, 200, 0), 2);
            circle(img, a, 3, Scalar(0, 0, 200), -1);
            circle(img, b, 3, Scalar(0, 0, 200), -1);
        }
        imshow("OpenPose", img);
        waitKey();
        return 0;
    }
    

    代码为网上的代码,连线部分有问题,需要改进

    TO-DO

    • HRnetv2 pytorch转onnx
    • opencv onnx推理
    • opencv dnn cuda加速

    相关文章

      网友评论

          本文标题:mmPose HRnetv2 pytorch—>onnx—>op

          本文链接:https://www.haomeiwen.com/subject/ugdzlrtx.html