美文网首页
ncnn源码阅读笔记(三)

ncnn源码阅读笔记(三)

作者: 半笔闪 | 来源:发表于2019-10-10 15:11 被阅读0次

    继续上一篇,上一篇最后有这么一段代码:

    //传递给对应layer对象
    int lr = layer->load_param(pd);
    if (lr != 0)
    {
          fprintf(stderr, "layer load_param failed\n");
          continue;
    }
    

    这一段就是把对应的特殊参数传递给根据不同的层类型创建的layer对象。这里说的不同的层类型目前主要有以下这么多(编译ncnn过程中生成的layer_type_enum.h):

    AbsVal = 0,
    ArgMax = 1,
    BatchNorm = 2,
    Bias = 3,
    BNLL = 4,
    Concat = 5,
    Convolution = 6,
    Crop = 7,
    Deconvolution = 8,
    Dropout = 9,
    Eltwise = 10,
    ELU = 11,
    Embed = 12,
    Exp = 13,
    Flatten = 14,
    InnerProduct = 15,
    Input = 16,
    Log = 17,
    LRN = 18,
    MemoryData = 19,
    MVN = 20,
    Pooling = 21,
    Power = 22,
    PReLU = 23,
    Proposal = 24,
    Reduction = 25,
    ReLU = 26,
    Reshape = 27,
    ROIPooling = 28,
    Scale = 29,
    Sigmoid = 30,
    Slice = 31,
    Softmax = 32,
    Split = 33,
    SPP = 34,
    TanH = 35,
    Threshold = 36,
    Tile = 37,
    RNN = 38,
    LSTM = 39,
    BinaryOp = 40,
    UnaryOp = 41,
    ConvolutionDepthWise = 42,
    Padding = 43,
    Squeeze = 44,
    ExpandDims = 45,
    Normalize = 46,
    Permute = 47,
    PriorBox = 48,
    DetectionOutput = 49,
    Interp = 50,
    DeconvolutionDepthWise = 51,
    ShuffleChannel = 52,
    InstanceNorm = 53,
    Clip = 54,
    Reorg = 55,
    YoloDetectionOutput = 56,
    Quantize = 57,
    Dequantize = 58,
    Yolov3DetectionOutput = 59,
    PSROIPooling = 60,
    ROIAlign = 61,
    Packing = 62,
    Requantize = 63,
    Cast = 64,
    HardSigmoid = 65,
    SELU = 66,
    

    来看layer.cpp中的

    int Layer::load_param(const ParamDict& /*pd*/)
    {
        return 0;
    }
    

    在layer.h中,可以看到

    // load layer specific parameter from parsed dict
    // return 0 if success
    virtual int load_param(const ParamDict& pd);
    

    load_param(const ParamDict& pd)是一个虚函数,在c++里虚函数的在这里的作用是让继承layer的子类去实现这个函数。调用的时候会自动调用子类的函数。这里还是拿卷积举例:
    可以看src/layer目录下的convolution.h和convolution.cpp,其中:

    class Convolution : public Layer
    {
    public:
        Convolution();
    
        virtual int load_param(const ParamDict& pd);
    

    Convolution类继承了Layer,所以当我们在net.cpp的load_param中创建了layer

    Layer* layer = create_layer(layer_type);
    

    并通过layer->load_param(pd)调用layer的load_param时,其实调用的是convolution.cpp实现的load_param。

    Layer

    说到create_layer(layer_type),再来看这个函数干了什么:
    代码在layer.h和layer.cpp中。

    static const layer_registry_entry layer_registry[] =
    {
    #include "layer_registry.h"
    };
    //注册的layer的总数
    static const int layer_registry_entry_count = sizeof(layer_registry) / sizeof(layer_registry_entry);
    #if NCNN_STRING
    //根据const char* type这个传进来的参数转换成对应的index
    int layer_to_index(const char* type)
    {
        for (int i=0; i<layer_registry_entry_count; i++)
        {
            //根据layer的名字对每个layer编号,返回对应的编号
            if (strcmp(type, layer_registry[i].name) == 0)
                return i;
        }
    
        return -1;
    }
    //根据const char* type这个layer类型创建layer
    Layer* create_layer(const char* type)
    {
        int index = layer_to_index(type);
        if (index == -1)
            return 0;
    
        return create_layer(index);   //如何创建layer
    }
    #endif // NCNN_STRING
    //根据index创建layer
    Layer* create_layer(int index)
    {
        if (index < 0 || index >= layer_registry_entry_count)
            return 0;
    
        layer_creator_func layer_creator = layer_registry[index].creator;
        if (!layer_creator)
            return 0;
    
        Layer* layer = layer_creator();
        layer->typeindex = index;
        return layer;
    }
    

    这里我们来看下如何创建layer,主要是这句

    layer_creator_func layer_creator = layer_registry[index].creator;
    

    那么layer_registry是哪来的,是

    static const layer_registry_entry layer_registry[] =
    {
    #include "layer_registry.h"
    };
    

    来看layer_registry.h,太长就贴一个卷积的,是ncnn编译完生成的文件

    #if NCNN_STRING
    {"Convolution",Convolution_final_layer_creator},
    #else
    {Convolution_final_layer_creator},
    #endif
    

    layer_registry的类型是layer_registry_entry,layer_registry_entry的结构在layer.h中可以看到

    struct layer_registry_entry
    {
    #if NCNN_STRING
        // layer type name
        const char* name;
    #endif // NCNN_STRING
        // layer factory entry
        layer_creator_func creator;
    };
    

    layer_registry其实就是不同类型层的layer_registry_entry数组,当我们要创建层是,就根据index查找数组中对应的layer_registry_entry,然后调用creator。这有点像java中的反射,通过字符串调用结构体(或类)的函数。
    creator的类型是layer_creator_func,

    // layer factory function
    typedef Layer* (*layer_creator_func)();
    

    还是以卷积举例,layer_registry.h中可以看到卷积卷积对应的creator就是Convolution_final_layer_creator,当调用creator的时候,其实就是调用(*layer_creator_func)()这个指针函数,也就是调用Convolution_final_layer_creator()这个函数。而在layer.h的最后有这么一段:

    #define DEFINE_LAYER_CREATOR(name) \
        ::ncnn::Layer* name##_layer_creator() { return new name; }
    //Convolution_final_layer_creator()  name不是应该对应Convolution_final吗
    

    对于Convolution_final_layer_creator函数,这里的name就等于Convolution,其实就是new了一个Convolution对象返回。这里就有一个疑问了name不是应该对应Convolution_final吗,请教了ncnn群的大佬,终于有了解答:
    在ncnn编译完后,还生成了一个layer_declaration.h文件。里面对应卷积的有这么一段:

    #include "layer/convolution.h"
    #include "layer/x86/convolution_x86.h"
    namespace ncnn {
    class Convolution_final : virtual public Convolution, virtual public Convolution_x86
    {
    public:
        virtual int create_pipeline(const Option& opt) {
            { int ret = Convolution::create_pipeline(opt); if (ret) return ret; }
            { int ret = Convolution_x86::create_pipeline(opt); if (ret) return ret; }
            return 0;
        }
        virtual int destroy_pipeline(const Option& opt) {
            { int ret = Convolution_x86::destroy_pipeline(opt); if (ret) return ret; }
            { int ret = Convolution::destroy_pipeline(opt); if (ret) return ret; }
            return 0;
        }
    };
    DEFINE_LAYER_CREATOR(Convolution_final)
    } // namespace ncnn
    

    可以看到Convolution_final继承了Convolution,所以调用时也就调用了子类重写或继承的函数。

    相关文章

      网友评论

          本文标题:ncnn源码阅读笔记(三)

          本文链接:https://www.haomeiwen.com/subject/enpopctx.html