美文网首页
openvino部署pytorch分类模型(C++版)

openvino部署pytorch分类模型(C++版)

作者: 1037号森林里一段干木头 | 来源:发表于2021-05-21 11:37 被阅读0次

摘要:
本篇文章主要关注对openvino C++接口的调用,根据它的工作逻辑建立一个完整的demo。


openVino推理引擎工作逻辑.png

1. 模型准备

pytorch模型转为onnx格式(这个pytorch里面已经可以很方便的转换了)。安装好openvino后在命令行下输入一下命令即可转换为openvino需要的.bin 和.xml格式的模型。

python "G:\openVINO\install\openvino_2021\deployment_tools\model_optimizer\mo.py" --input_model="K:\classifier5.onnx" --output_dir="K:\\model\\5" --model_name="classifier5" --data_type=FP16
image.png

2. vs2017配置openvino+opencv

包含目录


image.png

库目录


image.png
附加依赖项
image.png

3.说明

根据openvino推理引擎的逻辑,先需要读取模型--->准备input bolb 和output bolb--->模型导入到设备上--->创建infer request,后面就可以不断的输入图像去预测了。

4.源码

#include <iostream>
#include <vector>
#include <string>
#include "opencv.hpp"
#include "inference_engine.hpp"


//read model 
int readModel(std::string binPath, std::string xmlPath, InferenceEngine::CNNNetwork &model)
{
    InferenceEngine::Core ie;
    if (binPath.empty() || xmlPath.empty())
    {
        return -1;
    }
    try
    {
        model = ie.ReadNetwork(xmlPath, binPath);
    }
    catch (...)
    {
        return -1;
    }

    return 0;
}

int predict(InferenceEngine::CNNNetwork &model,
    InferenceEngine::InferRequest &infer_request,
    cv::Mat &image)
{
    std::string input_name = model.getInputsInfo().begin()->first;
    std::string output_name = model.getOutputsInfo().begin()->first;

    InferenceEngine::Blob::Ptr input = infer_request.GetBlob(input_name);
    InferenceEngine::Blob::Ptr output = infer_request.GetBlob(output_name);

    cv::resize(image, image, cv::Size(input->getTensorDesc().getDims()[2],
        input->getTensorDesc().getDims()[3]));

    size_t channels_number = input->getTensorDesc().getDims()[1];
    size_t image_size = input->getTensorDesc().getDims()[3] * input->getTensorDesc().getDims()[2];

    auto input_data = input->buffer()
        .as<InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type *>();
    for (size_t pid = 0; pid < image_size; ++pid)
    {
        for (size_t ch = 0; ch < channels_number; ++ch)
        {
            input_data[ch*image_size + pid] = image.at<cv::Vec3b>(pid)[ch] / 255.0f;
        }
    }

    infer_request.Infer();

    auto output_data = output->buffer().
        as<InferenceEngine::PrecisionTrait<InferenceEngine::Precision::FP32>::value_type *>();
    /*for (size_t id = 0; id < 2; ++id)
    {
        auto result = output_data[id];
        std::cout << result << "\n";
    }*/
    if (output_data[0] < output_data[1]) return 1;
    else { return -1; }
    return 0;
}

int initiateModel(std::string binPath, std::string xmlPath,
    InferenceEngine::CNNNetwork &model,
    InferenceEngine::InferRequest &infer_request)
{
    InferenceEngine::Core ie;
    
    int readFlag = readModel(binPath, xmlPath, model);
    if (readFlag == -1)
    {
        //read model failed
        return -1;
    }

    //prepare input blobs
    InferenceEngine::InputInfo::Ptr input_info = model.getInputsInfo().begin()->second;;
    std::string input_name = model.getInputsInfo().begin()->first;
    input_info->setLayout(InferenceEngine::Layout::NCHW);
    input_info->setPrecision(InferenceEngine::Precision::FP32);

    //prepare output blobs
    InferenceEngine::DataPtr output_info = model.getOutputsInfo().begin()->second;
    std::string output_name = model.getOutputsInfo().begin()->first;
    output_info->setPrecision(InferenceEngine::Precision::FP32);

    //load model to device
    InferenceEngine::ExecutableNetwork  executable_network = ie.LoadNetwork(model, "CPU");

    //create infer request
    infer_request = executable_network.CreateInferRequest();

    return 0;
}

int demo()
{
    std::string binPath = "K:\\model\\5\\classifier5.bin";
    std::string xmlPath = "K:\\model\\5\\classifier5.xml";
    std::string imagePath = "K:\\imageData\\polarity\\data5\\val\\neg\\00114.bmp";
    InferenceEngine::CNNNetwork model;
    InferenceEngine::InferRequest infer_request;
    //initinalize the model
    initiateModel(binPath, xmlPath, model, infer_request);

    cv::Mat image = cv::imread(imagePath);
    cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
    if (image.empty()) {
        return -1;
    }
    int cls = predict(model, infer_request, image);

    if (cls == 1)
    {
        std::cout << "pos\n";
    }
    else if (cls == -1)
    {
        std::cout << "neg\n";
    }

    return 0;
}

int getAllImagePath(std::string path, std::vector<cv::String> &imagePathList)
{
    cv::glob(path, imagePathList);
    return 0;
}

int accTest()
{
    std::vector<cv::String> imagePathList;
    std::string binPath = "K:\\model\\5\\classifier5.bin";
    std::string xmlPath = "K:\\model\\5\\classifier5.xml";
    std::string imagePath = "K:\\imageData\\polarity\\data5\\val\\neg\\00114.bmp";
    std::string folder = "K:\\imageData\\polarity\\data5\\val\\neg";
    InferenceEngine::CNNNetwork model;
    InferenceEngine::InferRequest infer_request;
    //initinalize the model
    initiateModel(binPath, xmlPath, model, infer_request);


    getAllImagePath(folder, imagePathList);
    int neg = 0;
    int pos = 0;
    int item;
    for (auto &path : imagePathList)
    {
        cv::Mat image = cv::imread(path);
        cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
        if (image.empty()) {
            return -1;
        }
        item = predict(model, infer_request, image);
        if (item == -1)
        {
            neg += 1;
        }
        else if (item == 1)
        {
            pos += 1;
        }
    }
    float all = pos + neg;
    printf("pos=%d, neg=%d, acc:pos:%f neg:%f\n", pos, neg, pos / all, neg / all);

    return 0;
}


int speedTest()
{
    std::vector<cv::String> imagePathList;
    std::string binPath = "K:\\model\\5\\classifier5.bin";
    std::string xmlPath = "K:\\model\\5\\classifier5.xml";
    std::string imagePath = "K:\\imageData\\polarity\\data5\\val\\neg\\00114.bmp";
    std::string folder = "K:\\imageData\\polarity\\data5\\val\\neg";
    InferenceEngine::CNNNetwork model;
    InferenceEngine::InferRequest infer_request;
    //initinalize the model
    initiateModel(binPath, xmlPath, model, infer_request);

    cv::Mat image = cv::imread(imagePath);
    cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
    if (image.empty()) {
        return -1;
    }

    int num = 1000;
    clock_t startTime = clock();
    for (int i = 0; i < num; i++)   predict(model, infer_request, image);
    clock_t endTime = clock();

    std::cout << "inference one image " << num << "times run time: " << double(endTime - startTime) * 1000 / CLOCKS_PER_SEC << "ms\n";
    return 0;
}



int main()
{
    demo();
    accTest();
    speedTest();

}

5.结果

image.png

相关文章

网友评论

      本文标题:openvino部署pytorch分类模型(C++版)

      本文链接:https://www.haomeiwen.com/subject/hlfwjltx.html