美文网首页
Android软硬件渲染

Android软硬件渲染

作者: andorid_xiao | 来源:发表于2024-05-26 10:31 被阅读0次

Android9.0源码分析

绘制入口

//frameworks/base/core/java/android/view/ViewRootImpl.java
private boolean draw(boolean fullRedrawNeeded) {
        Surface surface = mSurface;
        ...
        if (!dirty.isEmpty() || mIsAnimating || accessibilityFocusDirty) {
            if (mAttachInfo.mThreadedRenderer != null && mAttachInfo.mThreadedRenderer.isEnabled()) {
               ...
                final FrameDrawingCallback callback = mNextRtFrameCallback;
                mNextRtFrameCallback = null;
                //硬件绘制
                mAttachInfo.mThreadedRenderer.draw(mView, mAttachInfo, this, callback);
            } else {            
                ...
                //软件绘制
                if (!drawSoftware(surface, mAttachInfo, xOffset, yOffset,
                        scalingRequired, dirty, surfaceInsets)) {
                    return false;
                }
            }
        }

        ...
        return useAsyncReport;
    }

Android软件渲染流程

  1. surface的创建

  2. canvas的创建

  • 创建java层Canvas对象
//frameworks/base/core/java/android/view/Surface.java
private final Canvas mCanvas = new CompatibleCanvas();
//private final class CompatibleCanvas extends Canvas
  • 非硬件加速情况下,通过nInitRaster()方法在native层创建对应的Canvas对象
//frameworks/base/graphics/java/android/graphics/Canvas.java
public Canvas() {
      if (!isHardwareAccelerated()) {
            // 0 means no native bitmap
            mNativeCanvasWrapper = nInitRaster(null);
            mFinalizer = NoImagePreloadHolder.sRegistry.registerNativeAllocation(
                    this, mNativeCanvasWrapper);
        } else {
            mFinalizer = null;
        }
}
  • 调用Canvas::create_canvas()方法创建canvas,Canvas为native层的封装
//frameworks/base/libs/hwui/jni/android_graphics_Canvas.cpp
static jlong initRaster(JNIEnv* env, jobject, jobject jbitmap) {
    SkBitmap bitmap;
    if (jbitmap != NULL) {
        GraphicsJNI::getSkBitmap(env, jbitmap, &bitmap);
    }
    return reinterpret_cast<jlong>(Canvas::create_canvas(bitmap));
}
  • SkiaCanvs为Canvas子类,说明底层使用Skia引擎进行渲染
//frameworks/base/libs/hwui/SkiaCanvas.cpp
Canvas* Canvas::create_canvas(const SkBitmap& bitmap) {
    return new SkiaCanvas(bitmap);
}

SkiaCanvas::SkiaCanvas(const SkBitmap& bitmap) {
    sk_sp<SkColorSpace> cs = bitmap.refColorSpace();
    mCanvasOwned =
            std::unique_ptr<SkCanvas>(new SkCanvas(bitmap, SkCanvas::ColorBehavior::kLegacy));
    if (cs.get() == nullptr || cs->isSRGB()) {
        if (!uirenderer::Properties::isSkiaEnabled()) {
            mCanvasWrapper =
                    SkCreateColorSpaceXformCanvas(mCanvasOwned.get(), SkColorSpace::MakeSRGB());
            mCanvas = mCanvasWrapper.get();
        } else {
            mCanvas = mCanvasOwned.get();
        }
    } else {
        mCanvasWrapper = SkCreateColorSpaceXformCanvas(mCanvasOwned.get(), std::move(cs));
        mCanvas = mCanvasWrapper.get();
    }
}

  • 最终创建了一个Skia的画板,其中SkBitmapDevice表明后端渲染使用的是软件渲染
//external/skia/src/core/SkCanvas.cpp
SkCanvas::SkCanvas(const SkBitmap& bitmap, const SkSurfaceProps& props)
    : fMCStack(sizeof(MCRec), fMCRecStorage, sizeof(fMCRecStorage))
    , fProps(props)
{
    inc_canvas();
    //此处创建的SkBitmapDevice表明后端渲染使用的是软件渲染
    sk_sp<SkBaseDevice> device(new SkBitmapDevice(bitmap, fProps));
    this->init(device.get(), kDefault_InitFlags);
}
//frameworks/base/core/jni/android_view_Surface.cpp

static jlong nativeLockCanvas(JNIEnv* env, jclass clazz,
        jlong nativeObject, jobject canvasObj, jobject dirtyRectObj) {
    sp<Surface> surface(reinterpret_cast<Surface *>(nativeObject));
    ...
    ANativeWindow_Buffer outBuffer;
    status_t err = surface->lock(&outBuffer, dirtyRectPtr);
    ...
    SkImageInfo info = SkImageInfo::Make(outBuffer.width, outBuffer.height,
                                         convertPixelFormat(outBuffer.format),
                                         outBuffer.format == PIXEL_FORMAT_RGBX_8888
                                                 ? kOpaque_SkAlphaType : kPremul_SkAlphaType,
                                         GraphicsJNI::defaultColorSpace());

    SkBitmap bitmap;
    ssize_t bpr = outBuffer.stride * bytesPerPixel(outBuffer.format);
    bitmap.setInfo(info, bpr);
    if (outBuffer.width > 0 && outBuffer.height > 0) {
        bitmap.setPixels(outBuffer.bits);
    } else {
        // be safe with an empty bitmap.
        bitmap.setPixels(NULL);
    }

    Canvas* nativeCanvas = GraphicsJNI::getNativeCanvas(env, canvasObj);
    nativeCanvas->setBitmap(bitmap);
    ...

    // Create another reference to the surface and return it.  This reference
    // should be passed to nativeUnlockCanvasAndPost in place of mNativeObject,
    // because the latter could be replaced while the surface is locked.
    sp<Surface> lockedSurface(surface);
    lockedSurface->incStrong(&sRefBaseOwner);
    return (jlong) lockedSurface.get();
}

Android硬件渲染流程

  • android系统默认开启硬件加速,开发者可以在应用的Manifest文件中通过<application>以及<activity>标签的android:hardwareAccelerated属性来关闭或开启应用或特定Activity的硬件加速(同时即使应用级别的硬件加速被关闭,开发者仍然可以通过View.setLayerType()方法来为特定的视图开启硬件加速)。
1. 硬件加速开启条件

从启动绘制代码能看出只有mThreadRender非空并且可用的状态下,才会使用硬件渲染,先看看mThreadRender的赋值条件。

//frameworks/base/core/java/android/view/ViewRootImpl.java
private void enableHardwareAcceleration(WindowManager.LayoutParams attrs) {
        ...
        // Try to enable hardware acceleration if requested
        final boolean hardwareAccelerated =
                (attrs.flags & WindowManager.LayoutParams.FLAG_HARDWARE_ACCELERATED) != 0;

        if (hardwareAccelerated) {
            ...
            if (fakeHwAccelerated) {
            ...
            } else if (!ThreadedRenderer.sRendererDisabled
                    || (ThreadedRenderer.sSystemRendererDisabled && forceHwAccelerated)) {
                ...
                mAttachInfo.mThreadedRenderer = ThreadedRenderer.create(mContext, translucent,
                        attrs.getTitle().toString());
                ...
            }
        }
    }

上述enableHardwareAcceleration方法中attr参数由WindowManagerGlobal在创建ViewRootImpl时传入,代码如下。

//frameworks/base/core/java/android/view/WindowManagerGlobal.java
public void addView(View view, ViewGroup.LayoutParams params,
            Display display, Window parentWindow, int userId) {
        ...
        final WindowManager.LayoutParams wparams = (WindowManager.LayoutParams) params;
        if (parentWindow != null) {
            parentWindow.adjustLayoutParamsForSubWindow(wparams);
        } else {
            // If there's no parent, then hardware acceleration for this view is
            // set from the application's hardware acceleration setting.
            final Context context = view.getContext();
            if (context != null
                    && (context.getApplicationInfo().flags
                            & ApplicationInfo.FLAG_HARDWARE_ACCELERATED) != 0) {
                //应用未指定禁用硬件渲染,则attr.flags设置为使用硬件渲染
                wparams.flags |= WindowManager.LayoutParams.FLAG_HARDWARE_ACCELERATED;
            }
        }

        ViewRootImpl root;
        ...
        if (windowlessSession == null) {
             root = new ViewRootImpl(view.getContext(), display);
        } else {
            root = new ViewRootImpl(view.getContext(), display,
            windowlessSession, new WindowlessWindowLayout());
        }
        ...
        // do this last because it fires off messages to start doing things
        try {
            root.setView(view, wparams, panelParentView, userId);
        } catch (RuntimeException e) {
            ...
        }        
    }

从代码可以看出,此处硬件加速正好对应上了应用的配置。接下来看看硬件绘制的场景下,其绘制的基本流程。

  • draw()
    void draw(View view, AttachInfo attachInfo, DrawCallbacks callbacks,
            FrameDrawingCallback frameDrawingCallback) {
        attachInfo.mIgnoreDirtyState = true;

        final Choreographer choreographer = attachInfo.mViewRootImpl.mChoreographer;
        choreographer.mFrameInfo.markDrawStart();
        //更新绘制列表
        updateRootDisplayList(view, callbacks);
        ...
        final long[] frameInfo = choreographer.mFrameInfo.mFrameInfo;
        if (frameDrawingCallback != null) {
            nSetFrameCallback(mNativeProxy, frameDrawingCallback);
        }
        //实际的绘制
        int syncResult = nSyncAndDrawFrame(mNativeProxy, frameInfo, frameInfo.length);
        ...
    }

  • 更新根view绘制列表
//frameworks/base/core/java/android/view/ThreadedRenderer.java
private void updateRootDisplayList(View view, DrawCallbacks callbacks) {
        Trace.traceBegin(Trace.TRACE_TAG_VIEW, "Record View#draw()");
        //更新子view绘制列表
        updateViewTreeDisplayList(view);

        if (mRootNodeNeedsUpdate || !mRootNode.isValid()) {
            DisplayListCanvas canvas = mRootNode.start(mSurfaceWidth, mSurfaceHeight);
            try {
                final int saveCount = canvas.save();
                canvas.translate(mInsetLeft, mInsetTop);
                callbacks.onPreDraw(canvas);

                canvas.insertReorderBarrier();
                canvas.drawRenderNode(view.updateDisplayListIfDirty());
                canvas.insertInorderBarrier();

                callbacks.onPostDraw(canvas);
                canvas.restoreToCount(saveCount);
                mRootNodeNeedsUpdate = false;
            } finally {
                mRootNode.end(canvas);
            }
        }
        Trace.traceEnd(Trace.TRACE_TAG_VIEW);
    }
  • 更新子view的绘制列表
//frameworks/base/core/java/android/view/ThreadedRenderer.java
private void updateViewTreeDisplayList(View view) {
        ...
        view.updateDisplayListIfDirty();
        view.mRecreateDisplayList = false;
    }
//frameworks/base/core/java/android/view/View.java
public RenderNode updateDisplayListIfDirty() {
        final RenderNode renderNode = mRenderNode;
        ...

        if ((mPrivateFlags & PFLAG_DRAWING_CACHE_VALID) == 0
                || !renderNode.isValid()
                || (mRecreateDisplayList)) {
                ...
                dispatchGetDisplayList();  //分发子view生成绘制列表

                return renderNode; // no work needed
            }

            // If we got here, we're recreating it. Mark it as such to ensure that
            // we copy in child display lists into ours in drawChild()
            mRecreateDisplayList = true;
            ...
            final DisplayListCanvas canvas = renderNode.start(width, height);

            try {
                // 如果当前view指定软件渲染,则直接绘制到bitmap,用于后期合成
                if (layerType == LAYER_TYPE_SOFTWARE) {
                    buildDrawingCache(true);
                    Bitmap cache = getDrawingCache(true);
                    if (cache != null) {
                        canvas.drawBitmap(cache, 0, 0, mLayerPaint);
                    }
                } else {
                    computeScroll();
                    ...
                    //绘制
                    draw(canvas);
                    
                }
            } finally {
                renderNode.end(canvas);  //绘制结束
                setDisplayListProperties(renderNode);
            }
        } else {
            mPrivateFlags |= PFLAG_DRAWN | PFLAG_DRAWING_CACHE_VALID;
            mPrivateFlags &= ~PFLAG_DIRTY_MASK;
        }
        return renderNode;
    }

//frameworks/base/core/java/android/view/RenderNode.java
//绘制结束,记录好绘制指令
public void end(DisplayListCanvas canvas) {
        long displayList = canvas.finishRecording();
        nSetDisplayList(mNativeRenderNode, displayList);
        canvas.recycle();
    }
  • canvas的创建
//frameworks/base/core/jni/android_view_DisplayListCanvas.cpp
static jlong android_view_DisplayListCanvas_createDisplayListCanvas(jlong renderNodePtr,
        jint width, jint height) {
    RenderNode* renderNode = reinterpret_cast<RenderNode*>(renderNodePtr);
    return reinterpret_cast<jlong>(Canvas::create_recording_canvas(width, height, renderNode));
}
//frameworks/base/libs/hwui/hwui/Canvas.cpp
Canvas* Canvas::create_recording_canvas(int width, int height, uirenderer::RenderNode* renderNode) {
    if (uirenderer::Properties::isSkiaEnabled()) {
        return new uirenderer::skiapipeline::SkiaRecordingCanvas(renderNode, width, height);
    }
    return new uirenderer::RecordingCanvas(width, height);
}
  • skiaRecordingCanvas
//frameworks/base/libs/hwui/pipeline/skia/SkiaRecordingCanvas.h
class SkiaRecordingCanvas : public SkiaCanvas {
public:
    explicit SkiaRecordingCanvas(uirenderer::RenderNode* renderNode, int width, int height) {
        initDisplayList(renderNode, width, height);
    }
...
private:
    SkLiteRecorder mRecorder; //绘制指令
    std::unique_ptr<SkiaDisplayList> mDisplayList;
    ...
};
//
class SkLiteRecorder final : public SkNoDrawCanvas {
public:
    void onDrawRect  (const SkRect&, const SkPaint&) override;
    void onDrawRegion(const SkRegion&, const SkPaint&) override;
    ...
private:
    typedef SkNoDrawCanvas INHERITED;
    SkLiteDL* fDL;
}
//external/skia/src/core/SkLiteRecorder.cpp
void SkLiteRecorder::onDrawPaint(const SkPaint& paint) {
    fDL->drawPaint(paint);
}

其中SkLiteRecorder是SkCanvas子类,用于实现绘制指令的生成, 在java层对象View的onDraw(Canvas)方法后,整体的绘制指令已经生成完毕。

  • 绘制指令生成后,nSyncAndDrawFrame启动绘制,最终会调用到DrawFrameTask的run方法进行绘制,其中DrawFrameTask中的CanvasContext在RenderProxy中创建
//frameworks/base/libs/hwui/renderthread/RenderProxy.cpp
RenderProxy::RenderProxy(bool translucent, RenderNode* rootRenderNode,
                         IContextFactory* contextFactory)
        : mRenderThread(RenderThread::getInstance()), mContext(nullptr) {
    mContext = mRenderThread.queue().runSync([&]() -> CanvasContext* {
        return CanvasContext::create(mRenderThread, translucent, rootRenderNode, contextFactory);
    });
    mDrawFrameTask.setContext(&mRenderThread, mContext, rootRenderNode);
}

//frameworks/base/libs/hwui/renderthread/CanvasContext.cpp
CanvasContext* CanvasContext::create(RenderThread& thread, bool translucent,
                                     RenderNode* rootRenderNode, IContextFactory* contextFactory) {
    auto renderType = Properties::getRenderPipelineType();

    switch (renderType) {
        case RenderPipelineType::OpenGL:
            return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
                                     std::make_unique<OpenGLPipeline>(thread));
        case RenderPipelineType::SkiaGL:
            return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
                                     std::make_unique<skiapipeline::SkiaOpenGLPipeline>(thread));
        case RenderPipelineType::SkiaVulkan:
            return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
                                     std::make_unique<skiapipeline::SkiaVulkanPipeline>(thread));
        default:
            LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
            break;
    }
    return nullptr;
}
  • 继续来看下SkiaOpenGLPipeline的draw()方法, 其中renderFrame继续进行绘制操作,最终会调用到RenderNode的draw()方法

bool SkiaOpenGLPipeline::draw(const Frame& frame, const SkRect& screenDirty, const SkRect& dirty,
                              const FrameBuilder::LightGeometry& lightGeometry,
                              LayerUpdateQueue* layerUpdateQueue, const Rect& contentDrawBounds,
                              bool opaque, bool wideColorGamut,
                              const BakedOpRenderer::LightInfo& lightInfo,
                              const std::vector<sp<RenderNode>>& renderNodes,
                              FrameInfoVisualizer* profiler) {
    mEglManager.damageFrame(frame, dirty);
    ...
    //绘制帧
    renderFrame(*layerUpdateQueue, dirty, renderNodes, opaque, wideColorGamut, contentDrawBounds,
                surface);
    layerUpdateQueue->clear();
    ...
    return true;
}

//frameworks/base/libs/hwui/pipeline/skia/SkiaPipeline.cpp
void SkiaPipeline::renderFrame(const LayerUpdateQueue& layers, const SkRect& clip,
                               const std::vector<sp<RenderNode>>& nodes, bool opaque,
                               bool wideColorGamut, const Rect& contentDrawBounds,
                               sk_sp<SkSurface> surface) {
    ...
    // capture is enabled.
    std::unique_ptr<SkPictureRecorder> recorder;
    SkCanvas* canvas = tryCapture(surface.get());

    renderFrameImpl(layers, clip, nodes, opaque, wideColorGamut, contentDrawBounds, canvas);
    ...
    surface->getCanvas()->flush();
}

void SkiaPipeline::renderFrameImpl(const LayerUpdateQueue& layers, const SkRect& clip,
                                   const std::vector<sp<RenderNode>>& nodes, bool opaque,
                                   bool wideColorGamut, const Rect& contentDrawBounds,
                                   SkCanvas* canvas) {
    if (1 == nodes.size()) {
        if (!nodes[0]->nothingToDraw()) {
            RenderNodeDrawable root(nodes[0].get(), canvas);
            //绘制
            root.draw(canvas);
        }
    } 
}
  • 继续看RenderNodeDrawable的draw()方法, 最终会调用到drawContent()方法
//frameworks/base/libs/hwui/pipeline/skia/RenderNodeDrawable.cpp
void RenderNodeDrawable::drawContent(SkCanvas* canvas) const {
    RenderNode* renderNode = mRenderNode.get();
    ...
    SkiaDisplayList* displayList = (SkiaDisplayList*)mRenderNode->getDisplayList();
    ...
    //绘制
    displayList->draw(canvas);           
}
  • SkiaDisplayList的draw()方法
//frameworks/base/libs/hwui/pipeline/skia/SkiaDisplayList.h
SkLiteDL mDisplayList;
void draw(SkCanvas* canvas) { mDisplayList.draw(canvas); }

draw(SkCanvas* canvas)方法中cancas由前面创建的skSurface中获取,至此,skia硬件渲染正式开始。

Flutter源码

skia绘制

engine仓库

软硬件渲染选择
/Users/omegaxiao/MyCode/engine/shell/platform/android/platform_view_android.cc

std::unique_ptr<AndroidSurface> AndroidSurfaceFactoryImpl::CreateSurface() {
  switch (android_context_->RenderingApi()) {
    case AndroidRenderingAPI::kSoftware:
      return std::make_unique<AndroidSurfaceSoftware>();
    case AndroidRenderingAPI::kImpellerOpenGLES:
      return std::make_unique<AndroidSurfaceGLImpeller>(
          std::static_pointer_cast<AndroidContextGLImpeller>(android_context_));
    case AndroidRenderingAPI::kSkiaOpenGLES:
      return std::make_unique<AndroidSurfaceGLSkia>(
          std::static_pointer_cast<AndroidContextGLSkia>(android_context_));
    case AndroidRenderingAPI::kImpellerVulkan:
      return std::make_unique<AndroidSurfaceVulkanImpeller>(
          std::static_pointer_cast<AndroidContextVulkanImpeller>(
              android_context_));
  }
  FML_UNREACHABLE();
}

参考

surface的创建

相关文章

网友评论

      本文标题:Android软硬件渲染

      本文链接:https://www.haomeiwen.com/subject/sgixqjtx.html