美文网首页
Android JNI实现图片处理和coco-mobilenet

Android JNI实现图片处理和coco-mobilenet

作者: 码农小白two | 来源:发表于2019-11-22 12:04 被阅读0次

    话不多说,想要搬移代码快速实现的直接看github:https://github.com/Frank1481906280/CV4Android
    一、opencv的JNI配置可直接参考其他的blog,我贴出cmakelist,基本就行了。基本要求是要配置好NDK等,这些都可以在Android studio中完成。

    # For more information about using CMake with Android Studio, read the
    # documentation: https://d.android.com/studio/projects/add-native-code.html
    
    # Sets the minimum version of CMake required to build the native library.
    
    cmake_minimum_required(VERSION 3.4.1)
    # ##################### OpenCV 环境 ############################
    #设置OpenCV-android-sdk路径
    set( OpenCV_DIR D:/opencv-3.4.6-android/OpenCV-android-sdk/sdk/native/jni )
    find_package(OpenCV REQUIRED )
    if(OpenCV_FOUND)
        include_directories(${OpenCV_INCLUDE_DIRS})
        message(STATUS "OpenCV library status:")
        message(STATUS "    version: ${OpenCV_VERSION}")
        message(STATUS "    libraries: ${OpenCV_LIBS}")
        message(STATUS "    include path: ${OpenCV_INCLUDE_DIRS}")
    else(OpenCV_FOUND)
        message(FATAL_ERROR "OpenCV library not found")
    endif(OpenCV_FOUND)
    
    # Creates and names a library, sets it as either STATIC
    # or SHARED, and provides the relative paths to its source code.
    # You can define multiple libraries, and CMake builds them for you.
    # Gradle automatically packages shared libraries with your APK.
    
    add_library( # Sets the name of the library.
                 native-lib
    
                 # Sets the library as a shared library.
                 SHARED
    
                 # Provides a relative path to your source file(s).
                 native-lib.cpp )
    
    # Searches for a specified prebuilt library and stores the path as a
    # variable. Because CMake includes system libraries in the search path by
    # default, you only need to specify the name of the public NDK library
    # you want to add. CMake verifies that the library exists before
    # completing its build.
    
    find_library( # Sets the name of the path variable.
                  log-lib
    
                  # Specifies the name of the NDK library that
                  # you want CMake to locate.
                  log )
    
    # Specifies libraries CMake should link to your target library. You
    # can link multiple libraries, such as libraries you define in this
    # build script, prebuilt third-party libraries, or system libraries.
    
    target_link_libraries( # Specifies the target library.
                           native-lib
                           ${OpenCV_LIBS}
                           log
                           jnigraphics
                           # Links the target library to the log library
                           # included in the NDK.
                           ${log-lib} )
    

    二、关于常规图片的处理建议使用#include <android/bitmap.h>,这样可以直接将java层获取的bitmap直接转成OpenCV Mat类型。示例如下:

    extern "C"
    JNIEXPORT void JNICALL
    Java_com_example_cv4android_MainActivity_pic2binary(JNIEnv *env, jobject thiz, jobject bitmap) {
        // TODO: implement pic2binary()
        AndroidBitmapInfo info; void *pixels;
        CV_Assert(AndroidBitmap_getInfo(env, bitmap, &info) >= 0);
        CV_Assert(info.format == ANDROID_BITMAP_FORMAT_RGBA_8888 || info.format == ANDROID_BITMAP_FORMAT_RGB_565);
        CV_Assert(AndroidBitmap_lockPixels(env, bitmap, &pixels) >= 0);
        CV_Assert(pixels); if (info.format == ANDROID_BITMAP_FORMAT_RGBA_8888)
        {
            Mat temp(info.height, info.width, CV_8UC4, pixels);
            Mat gray;
            cvtColor(temp, gray, COLOR_RGBA2GRAY);
            adaptiveThreshold(gray,gray,255,1,0,5,10);
            cvtColor(gray, temp, COLOR_GRAY2RGBA);
        } else
        {
            Mat temp(info.height, info.width, CV_8UC2, pixels);
            Mat gray;
            cvtColor(temp, gray, COLOR_RGB2GRAY);
            adaptiveThreshold(gray,gray,255,1,1,5,10);
            cvtColor(gray, temp, COLOR_GRAY2RGB);
        }
        AndroidBitmap_unlockPixels(env, bitmap);
    }
    

    基本上只要转成了OpenCV所需要的Mat类型,你就可以使用任意的api对图像进行操作,但是我也踩过坑=。=(貌似使用bilateralFilter对图片进行双边滤波时候要求的type为CV_8UC3,但是我不管是从RGBA->RGB,或者直接CV_8UC4->CV_8UC3都不行)。
    3、关于coco-mobilenet的加载,我用的是tensorflow下的pb模型。这一部分难点就是java中读取模型存放的路径转到jni中。
    3.1首先我们先看JNI中是怎么用的:

    extern "C"
    JNIEXPORT void JNICALL
    Java_com_example_cv4android_MainActivity_load2JNI_1SSD(JNIEnv *env, jobject thiz, jstring pbpath,
                                                           jstring configpath) {
        // TODO: implement load2JNI_SSD()
        const char *filePath_1 = env->GetStringUTFChars(pbpath, 0);
        const char *filePath_2 = env->GetStringUTFChars(configpath, 0);
        net=readNetFromTensorflow(filePath_1,filePath_2);
        LOGE("加载分类器文件成功");
        env->ReleaseStringUTFChars(pbpath, filePath_1);
        env->ReleaseStringUTFChars(pbpath, filePath_2);
    }
    

    3.2java中怎么定义pbpath、configpath的

    private void LoadModel2() {
            String pbpath = getPath("frozen_inference_graph.pb", this);
            String configpath = getPath("graph.pbtxt", this);
            load2JNI_SSD(pbpath,configpath);
            //net= Dnn.readNetFromTensorflow(pbpath,configpath);
            flag2=true;
        }
        private native void load2JNI_SSD(String pbpath, String configpath);
        private static String getPath(String file, Context context) {
            AssetManager assetManager = context.getAssets();
            BufferedInputStream inputStream = null;
            try {
                // Read data from assets.
                inputStream = new BufferedInputStream(assetManager.open(file));
                byte[] data = new byte[inputStream.available()];
                inputStream.read(data);
                inputStream.close();
                // Create copy file in storage.
                File outFile = new File(context.getFilesDir(), file);
                FileOutputStream os = new FileOutputStream(outFile);
                os.write(data);
                os.close();
                // Return a path to file which may be read in common way.
                return outFile.getAbsolutePath();
            } catch (IOException ex) {
                Log.i("COCO-NET", "Failed to upload a file");
            }
            return "";
        }
    

    一目了然,我们将模型文件和config文件放置在assets目录下,通过读取assets目录下的文件获取路径传到我们的JNI中。
    4、直接看我们是怎么使用这个模型对bitmap进行物体检测的!

    extern "C"
    JNIEXPORT jintArray JNICALL
    Java_com_example_cv4android_MainActivity_SSD2detetct(JNIEnv *env, jobject thiz, jobject bitmap) {
        // TODO: implement SSD2detetct()
        vector<int> location_vec;
        AndroidBitmapInfo info; void *pixels;
        CV_Assert(AndroidBitmap_getInfo(env, bitmap, &info) >= 0);
        CV_Assert(info.format == ANDROID_BITMAP_FORMAT_RGBA_8888 || info.format == ANDROID_BITMAP_FORMAT_RGB_565);
        CV_Assert(AndroidBitmap_lockPixels(env, bitmap, &pixels) >= 0);
        CV_Assert(pixels); if (info.format == ANDROID_BITMAP_FORMAT_RGBA_8888)
        {
            LOGE("分析识别");
            Mat temp(info.height, info.width, CV_8UC4, pixels);
            cvtColor(temp,temp,COLOR_RGBA2RGB);
            int IN_WIDTH = 300;
            int IN_HEIGHT = 300;
            float WH_RATIO = (float)IN_WIDTH / IN_HEIGHT;
            double IN_SCALE_FACTOR = 0.007843;
            double MEAN_VAL = 127.5;
            double THRESHOLD = 0.2;
            //resize(temp,temp,Size(IN_HEIGHT,IN_WIDTH));
            Mat blob=blobFromImage(temp,IN_SCALE_FACTOR,Size(IN_WIDTH,IN_HEIGHT),Scalar(MEAN_VAL,MEAN_VAL,MEAN_VAL),
                                   false, false);
            net.setInput(blob);
            Mat detections =net.forward();
            Mat detectionMat(detections.size[2], detections.size[3], CV_32F, detections.ptr<float>());
            for(int i=0;i<detectionMat.rows;i++){
                float confidence = detectionMat.at<float>(i, 2);
    
                if (confidence > THRESHOLD)
                {
                    size_t objectClass = (size_t)(detectionMat.at<float>(i, 1));
                    int tl_x = static_cast<int>(detectionMat.at<float>(i, 3) * temp.cols);
                    int tl_y = static_cast<int>(detectionMat.at<float>(i, 4) * temp.rows);
                    int br_x = static_cast<int>(detectionMat.at<float>(i, 5) * temp.cols);
                    int br_y = static_cast<int>(detectionMat.at<float>(i, 6) * temp.rows);
                    String label = format("%s: %.2f", classNames[objectClass], confidence);
                    location_vec.push_back(tl_x);
                    location_vec.push_back(tl_y);
                    location_vec.push_back(br_x);
                    location_vec.push_back(br_y);
                    classname.push_back(label);
                    LOGE("location: %d,%d,%d,%d\n",tl_x,tl_y,br_x,br_y);
                    LOGE("label: %s",label.c_str());
                    //rectangle(temp, Point(tl_x, tl_y), Point(br_x, br_y), Scalar(255,155,155),3);
                    //putText(temp, label, Point(tl_x, tl_y), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
                }
            }
            //cvtColor(temp,temp,COLOR_RGB2RGBA);
        } else
        {
           //基本为彩色图像,这里我就没写了,需要的可以自己更改
            Mat temp(info.height, info.width, CV_8UC2, pixels);
    
        }
        AndroidBitmap_unlockPixels(env, bitmap);
        int vecSize=location_vec.size();
        if (vecSize == 0) return 0;
        jintArray jarr = env->NewIntArray(vecSize);
        //2.获取数组指针
        jint *PCarr = env->GetIntArrayElements(jarr, NULL);
        //3.赋值
        int i = 0;
        for(; i < vecSize; i++){
            PCarr[i] = location_vec.at(i);
        }
        location_vec.clear();
        //4.释放资源
        env->ReleaseIntArrayElements(jarr, PCarr, 0);
        //5.返回数组
        return jarr;
    }
    

    在这个函数中,我直接return获取了物体的类别和坐标,然后发回给java层,通过cavan来绘制文字和矩形框,故这一部分就不写了!

    总结:分享一下,免得以后自己忘记=- =。

    相关文章

      网友评论

          本文标题:Android JNI实现图片处理和coco-mobilenet

          本文链接:https://www.haomeiwen.com/subject/nybfwctx.html