美文网首页
(二十四)x264_macroblock_load_pic_po

(二十四)x264_macroblock_load_pic_po

作者: 奔向火星005 | 来源:发表于2018-09-28 22:58 被阅读0次

    x264_macroblock_load_pic_pointers函数主要用来为mb结构体赋予一个宏块的相关内存指针,及装载宏块相关内存数据,以便后面的编码,在介绍函数前,先看下
    x264用来存储一个宏块的内存数据和指针的结构体:

    struct {
    //...省略
            struct
            {
                //...省略
    
                /* pointer over mb of the frame to be compressed */
                pixel *p_fenc[3]; /* y,u,v */   //指向将要编码的宏块内存
                /* pointer to the actual source frame, not a block copy */
                pixel *p_fenc_plane[3];   //指向原图像中宏块的内存
    
                /* pointer over mb of the frame to be reconstructed  */
                pixel *p_fdec[3];   //指向将要重建的宏块内存
    
                /* pointer over mb of the references */
                int i_fref[2];    
                /* [12]: yN, yH, yV, yHV, (NV12 ? uv : I444 ? (uN, uH, uV, uHV, vN, ...)) */
                pixel *p_fref[2][X264_REF_MAX*2][12];  //指向参考帧内存数据,p_fref[0]对应前向参考帧,p_fref[1]对应后向参考帧
                pixel *p_fref_w[X264_REF_MAX*2];  /* weighted fullpel luma */
    
                /* fref stride */
                int     i_stride[3];
            } pic;
    //...省略
    }mb;
    

    x264_macroblock_load_pic_pointers的源码如下:

    static void ALWAYS_INLINE x264_macroblock_load_pic_pointers( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
    {
        //i==0代表亮度块,i==1代表色度块
        //后面的注释假设是逐行扫描,mb_interlaced为0
        int mb_interlaced = b_mbaff && MB_INTERLACED;
        int height = b_chroma ? 16 >> CHROMA_V_SHIFT : 16;
        int i_stride = h->fdec->i_stride[i];
        int i_stride2 = i_stride << mb_interlaced;
        int i_pix_offset = mb_interlaced
                         ? 16 * mb_x + height * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
                         : 16 * mb_x + height * mb_y * i_stride; //i_pix_offset = 16 * mb_x + height * mb_y * i_stride;
        pixel *plane_fdec = &h->fdec->plane[i][i_pix_offset];
        //int fdec_idx = !(mb_y&1);
        int fdec_idx = b_mbaff ? (mb_interlaced ? (3 + (mb_y&1)) : (mb_y&1) ? 2 : 4) : !(mb_y&1);
        pixel *intra_fdec = &h->intra_border_backup[fdec_idx][i][mb_x*16];
        int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
        /* ref_pix_offset[0] references the current field and [1] the opposite field. */
        if( mb_interlaced )
            ref_pix_offset[1] += (1-2*(mb_y&1)) * i_stride;
        h->mb.pic.i_stride[i] = i_stride2;
        h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
        if( b_chroma )
        {
            h->mc.load_deinterleave_chroma_fenc( h->mb.pic.p_fenc[1], h->mb.pic.p_fenc_plane[1], i_stride2, height );
            memcpy( h->mb.pic.p_fdec[1]-FDEC_STRIDE, intra_fdec, 8*sizeof(pixel) );
            memcpy( h->mb.pic.p_fdec[2]-FDEC_STRIDE, intra_fdec+8, 8*sizeof(pixel) );
            h->mb.pic.p_fdec[1][-FDEC_STRIDE-1] = intra_fdec[-1-8];
            h->mb.pic.p_fdec[2][-FDEC_STRIDE-1] = intra_fdec[-1];
        }
        else
        {
            //1.将p_fenc_plane内存拷贝到p_fenc
            h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE, h->mb.pic.p_fenc_plane[i], i_stride2, 16 );
            //2.将当前重建宏块正上方的宏块的最后一行,拷贝到过来
            memcpy( h->mb.pic.p_fdec[i]-FDEC_STRIDE, intra_fdec, 24*sizeof(pixel) );
            h->mb.pic.p_fdec[i][-FDEC_STRIDE-1] = intra_fdec[-1];  //当前重建宏块左上角一个像素
        }
        if( b_mbaff || h->mb.b_reencode_mb )
        {
            for( int j = 0; j < height; j++ )
                if( b_chroma )
                {
                    h->mb.pic.p_fdec[1][-1+j*FDEC_STRIDE] = plane_fdec[-2+j*i_stride2];
                    h->mb.pic.p_fdec[2][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
                }
                else
                    h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
        }
        pixel *plane_src, **filtered_src;
        for( int j = 0; j < h->mb.pic.i_fref[0]; j++ )
        {
            // Interpolate between pixels in same field.
            if( mb_interlaced )
            {
                plane_src = h->fref[0][j>>1]->plane_fld[i];
                filtered_src = h->fref[0][j>>1]->filtered_fld[i];
            }
            else
            {
                plane_src = h->fref[0][j]->plane[i];
                filtered_src = h->fref[0][j]->filtered[i];
            }
            h->mb.pic.p_fref[0][j][i*4] = plane_src + ref_pix_offset[j&1];  //逐行扫描时, ref_pix_offset[j&1]恒为0
    
            if( !b_chroma )
            {
                for( int k = 1; k < 4; k++ )
                    h->mb.pic.p_fref[0][j][i*4+k] = filtered_src[k] + ref_pix_offset[j&1];
                if( !i )
                {
                    if( h->sh.weight[j][0].weightfn )
                        h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> mb_interlaced][ref_pix_offset[j&1]];
                    else
                        h->mb.pic.p_fref_w[j] = h->mb.pic.p_fref[0][j][0];
                }
            }
        }
        if( h->sh.i_type == SLICE_TYPE_B )
            for( int j = 0; j < h->mb.pic.i_fref[1]; j++ )
            {
                if( mb_interlaced )
                {
                    plane_src = h->fref[1][j>>1]->plane_fld[i];
                    filtered_src = h->fref[1][j>>1]->filtered_fld[i];
                }
                else
                {
                    plane_src = h->fref[1][j]->plane[i];
                    filtered_src = h->fref[1][j]->filtered[i];
                }
                h->mb.pic.p_fref[1][j][i*4] = plane_src + ref_pix_offset[j&1];
    
                if( !b_chroma )
                    for( int k = 1; k < 4; k++ )
                        h->mb.pic.p_fref[1][j][i*4+k] = filtered_src[k] + ref_pix_offset[j&1];
            }
    }
    

    如源码注释所示,首先看下待编码帧和重建帧的宏块的相关处理,以yuv中的y为例,对应p_fenc_plane[0]和p_fenc[0],如下图:


    mbpic00.png

    然后看下P帧时,对参考帧相关像素内存的处理,以yuv中的y为例,如下图:


    mbpic01.png

    相关文章

      网友评论

          本文标题:(二十四)x264_macroblock_load_pic_po

          本文链接:https://www.haomeiwen.com/subject/nhvmoftx.html