美文网首页
图片框架 - Glide解码webp动图浅析

图片框架 - Glide解码webp动图浅析

作者: Stan_Z | 来源:发表于2020-07-17 16:22 被阅读0次

接上篇,最终通过ByteBufferWebpDecoder执行decode来对获取的图片数据进行解码。

项目是出自:https://github.com/zjupure/GlideWebpDecoder

ByteBufferWebpDecoder.java

@Override
public Resource<WebpDrawable> decode(@NonNull ByteBuffer source, int width, int height, @NonNull Options options) throws IOException {
    int length = source.remaining();
   byte[] data = new byte[length];
   source.get(data, 0, length);

    //1
   WebpImage webp = WebpImage.create(data);
   int sampleSize = Utils.getSampleSize(webp.getWidth(), webp.getHeight(), width, height);

    //2
   WebpDecoder webpDecoder = new WebpDecoder(mProvider, webp, source, sampleSize);
    webpDecoder.advance();

    //3
    Bitmap firstFrame = webpDecoder.getNextFrame();
   if (firstFrame == null) {
        return null;
   }

    Transformation<Bitmap> unitTransformation = UnitTransformation.get();
  
   //4
   return new WebpDrawableResource(new WebpDrawable(mContext, webpDecoder, mBitmapPool, unitTransformation, width, height,
           firstFrame));
}
1.WebpImage.create(data);

WebpImage.java

public static WebpImage create(byte[] source) {
    Preconditions.checkNotNull(source);
   ByteBuffer byteBuffer = ByteBuffer.allocateDirect(source.length);
   byteBuffer.put(source);
   byteBuffer.rewind();
   return nativeCreateFromDirectByteBuffer(byteBuffer);
}

这里native代码基于google开源项目 https://github.com/webmproject/libwebp

web.cpp

/**
* Creates a new WebPImage from the specified byte buffer. The data from the byte buffer is copied
* into native memory managed by WebPImage.
*
* @param byteBuffer A java.nio.ByteBuffer. Must be direct. Assumes data is the entire capacity
*      of the buffer

* @return a newly allocated WebPImage
*/

jobject WebPImage_nativeCreateFromDirectByteBuffer(JNIEnv* pEnv, jclass clazz, jobject byteBuffer) {
    jbyte* bbufInput = (jbyte*) pEnv->GetDirectBufferAddress(byteBuffer);
   if (!bbufInput) {
        throwIllegalArgumentException(pEnv, "ByteBuffer must be direct");
       return 0;
   }
    jlong capacity = pEnv->GetDirectBufferCapacity(byteBuffer);
   if (pEnv->ExceptionCheck()) {
        return 0;
   }
    std::vector<uint8_t> vBuffer(bbufInput, bbufInput + capacity);
   return WebPImage_nativeCreateFromByteVector(pEnv, vBuffer);

/**
* Creates a new WebPImage from the specified buffer.
*
* @param vBuffer the vector containing the bytes
* @return a newly allocated WebPImage
*/
jobject WebPImage_nativeCreateFromByteVector(JNIEnv* pEnv, std::vector<uint8_t>& vBuffer) {
    std::unique_ptr<WebPImage> spNativeWebpImage(new WebPImage());
   if (!spNativeWebpImage) {
        throwOutOfMemoryError(pEnv, "Unable to allocate native context");
       return 0;
   }
    // WebPData is on the stack as its only used during the call to WebPDemux.
   WebPData webPData;
   webPData.bytes = vBuffer.data();
   webPData.size = vBuffer.size();
   // Create the WebPDemuxer
   auto spDemuxer = std::unique_ptr<WebPDemuxer, decltype(&WebPDemuxDelete)> {
            WebPDemux(&webPData),
           WebPDemuxDelete
    };
   if (!spDemuxer) {
        // We may want to consider first using functions that will return a useful error code
       // if it fails to parse.
       throwIllegalArgumentException(pEnv, "Failed to create demuxer");
       //FBLOGW("unable to get demuxer");
       return 0;
   }

    spNativeWebpImage->pixelWidth = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_CANVAS_WIDTH);
   spNativeWebpImage->pixelHeight = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_CANVAS_HEIGHT);
   spNativeWebpImage->numFrames = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_FRAME_COUNT);
   spNativeWebpImage->loopCount = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_LOOP_COUNT);
   spNativeWebpImage->backgroundColor = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_BACKGROUND_COLOR);

   // Compute cached fields that require iterating the frames.
   jint durationMs = 0;
   std::vector<jint> frameDurationsMs;
   WebPIterator iter;

   if (WebPDemuxGetFrame(spDemuxer.get(), 1, &iter)) {
        do {
            durationMs += iter.duration;
           frameDurationsMs.push_back(iter.duration);
       } while (WebPDemuxNextFrame(&iter));
       WebPDemuxReleaseIterator(&iter);
   }

    spNativeWebpImage->durationMs = durationMs;
   spNativeWebpImage->frameDurationsMs = frameDurationsMs;
   jintArray frameDurationsArr = pEnv->NewIntArray(spNativeWebpImage->numFrames);
   pEnv->SetIntArrayRegion(frameDurationsArr, 0, spNativeWebpImage->numFrames, spNativeWebpImage->frameDurationsMs.data());

   // Ownership of pDemuxer and vBuffer is transferred to WebPDemuxerWrapper here.
   // Note, according to Rob Arnold, createNew assumes we throw exceptions but we don't. Though
   // he claims this won't happen in practice cause "Linux will overcommit pages, we should only
   // get this error if we run out of virtual address space." Also, Daniel C may be working
   // on converting to exceptions.

   spNativeWebpImage->spDemuxer = std::shared_ptr<WebPDemuxerWrapper>(
            new WebPDemuxerWrapper(std::move(spDemuxer), std::move(vBuffer)));
   // Create the WebPImage with the native context.
   jobject ret = pEnv->NewObject(
            sClazzWebPImage,
           sWebPImageConstructor,
           (jlong) spNativeWebpImage.get(),
           (jint)spNativeWebpImage->pixelWidth,
           (jint)spNativeWebpImage->pixelHeight,
           (jint)spNativeWebpImage->numFrames,
           (jint)spNativeWebpImage->durationMs,
           frameDurationsArr,
           (jint)spNativeWebpImage->loopCount,
           (jint)spNativeWebpImage->backgroundColor);

   if (ret != nullptr) {
        // Ownership was transferred.
       spNativeWebpImage->refCount = 1;
       spNativeWebpImage.release();
   }
    return ret;
}

这里就是在native创建WebpImage, 同时将byte buffer copy到native由WebpImage管理,同时native WebpImage 会创建一个java层的WebpImage供上层调用与之进行JNI操作。所以真正处理webp图片的功能在native WebpImage。

2.WebpDecoder初始化

WebpDecoder.java

private final LruCache<Integer, Bitmap> mFrameBitmapCache;
public WebpDecoder(GifDecoder.BitmapProvider provider, WebpImage webPImage, ByteBuffer rawData,
                  int sampleSize) {
    mBitmapProvider = provider;
   mWebPImage = webPImage;
...
    mTransparentFillPaint = new Paint();
...
   // 动画每一帧渲染后的Bitmap缓存
   mFrameBitmapCache = new LruCache<Integer, Bitmap>(MAX_FRAME_BITMAP_CACHE_SIZE) {
        @Override
       protected void entryRemoved(boolean evicted, Integer key, Bitmap oldValue, Bitmap newValue) {
            // Return the cached frame bitmap to the provider
           if (oldValue != null) {
                mBitmapProvider.release(oldValue);
           }
        }
    };
   setData(new GifHeader(), rawData, sampleSize);
}
3. webpDecoder.getNextFrame();
@Override
public Bitmap getNextFrame() {
    int frameNumber = getCurrentFrameIndex();
   ...
    for (int index = nextIndex; index < frameNumber; index++) {
        WebpFrameInfo frameInfo = mFrameInfos[index];
       if (!frameInfo.blendPreviousFrame) {
            disposeToBackground(canvas, frameInfo);
       }
        // render the previous frame
       renderFrame(index, canvas);
       if (Log.isLoggable(TAG, Log.DEBUG)) {
            Log.d(TAG, "renderFrame, index=" + index + ", blend=" + frameInfo.blendPreviousFrame
                   + ", dispose=" + frameInfo.disposeBackgroundColor);
       }
        if (frameInfo.disposeBackgroundColor) {
            disposeToBackground(canvas, frameInfo);
       }
    }
    ...
    // Then put the rendered frame into the BitmapCache
   cacheFrameBitmap(frameNumber, bitmap);
   return bitmap;
}

private void renderFrame(int frameNumber, Canvas canvas) {
    WebpFrameInfo frameInfo = mFrameInfos[frameNumber];
   int frameWidth = frameInfo.width / sampleSize;
   int frameHeight = frameInfo.height / sampleSize;
   int xOffset = frameInfo.xOffset / sampleSize;
   int yOffset = frameInfo.yOffset / sampleSize;
   WebpFrame webpFrame = mWebPImage.getFrame(frameNumber);
   try {
        Bitmap frameBitmap = mBitmapProvider.obtain(frameWidth, frameHeight, mBitmapConfig);
       frameBitmap.eraseColor(Color.TRANSPARENT);
       webpFrame.renderFrame(frameWidth, frameHeight, frameBitmap);
       canvas.drawBitmap(frameBitmap, xOffset, yOffset, null);
       mBitmapProvider.release(frameBitmap);
   } finally {
        webpFrame.dispose();
   }
}

通过WebpFrame获取帧数据,然后进行渲染,最后由mFrameBitmapCache缓存Bitmap。

4.最终return WebpDrawableResource

这里前面先初始化了一个unitTransformation,Transformation作用是:Resource经过Transformation转化为TransformedResource(eg:转化为圆角或者圆形)。这里初始化的unitTransformation直接返回Resource,不做任何处理。

return new WebpDrawableResource(new WebpDrawable(mContext, webpDecoder, mBitmapPool, unitTransformation, width, height,  firstFrame));

相关文章

网友评论

      本文标题:图片框架 - Glide解码webp动图浅析

      本文链接:https://www.haomeiwen.com/subject/lqoghktx.html