美文网首页
Metal案例:视频文件渲染

Metal案例:视频文件渲染

作者: 奉灬孝 | 来源:发表于2020-08-29 18:55 被阅读0次

    效果如下:


    效果图.gif

    渲染的实现思路主要有以下三步

    一、AVAssetReader从视频文件读取视频帧

    ①. 通过AVFoundation进行视频数据的采集,并将采集到的原始数据存储到CMSampleBufferRef中,即视频帧渲染样本。
    ②. 或者从从MOV/MP4文件读取CMSampleBufferRef 数据,即视频帧渲染样本。这种方式也是使用AVFoundation的子类AVAssetReader从视频文件中读取我们的视频渲染样本CMSampleBufferRef。

    下面的AVAssetReader工具类是按照官方文档封装的。

    RenderAssetReader
    #import "RenderAssetReader.h"
    
    @implementation RenderAssetReader
    {
        //轨道
        AVAssetReaderTrackOutput *readerVideoTrackOutput;
        //AVAssetReader可以从原始数据里获取解码后的音视频数据
        AVAssetReader   *assetReader;
        //视频地址
        NSURL *videoUrl;
        //锁
        NSLock *lock;
    }
    
    //初始化
    - (instancetype)initWithUrl:(NSURL *)url{
        
        self = [super init];
        if(self != nil)
        {
            videoUrl = url;
            lock = [[NSLock alloc]init];
            [self setUpAsset];
        }
        return self;
    }
    
    //Asset 相关设置
    -(void)setUpAsset{
       
        //AVURLAssetPreferPreciseDurationAndTimingKey 默认为NO,YES表示提供精确的时长
        NSDictionary *inputOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
        
        //1. 创建AVURLAsset 是AVAsset 子类,用于从本地/远程URL初始化资源
        AVURLAsset *inputAsset = [[AVURLAsset alloc] initWithURL:videoUrl options:inputOptions];
        
        //2.异步加载资源
        //weakSelf 解决循环引用
        __weak typeof(self) weakSelf = self;
        
        //定义属性名称
        NSString *tracks = @"tracks";
       
        //对资源所需的键执行标准的异步载入操作,这样就可以访问资源的tracks属性时,就不会受到阻碍.
        [inputAsset loadValuesAsynchronouslyForKeys:@[tracks] completionHandler: ^{
            
                //延长self 生命周期
                __strong typeof(self) strongSelf = weakSelf;
           
          //开辟子线程并发队列异步函数来处理读取的inputAsset
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
                NSError *error = nil;
        
                //获取状态码.
                AVKeyValueStatus tracksStatus = [inputAsset statusOfValueForKey:@"tracks" error:&error];
                //如果状态不等于成功加载,则返回并打印错误信息
                if (tracksStatus != AVKeyValueStatusLoaded)
                {
                    NSLog(@"error %@", error);
                    return;
                }
                //处理读取的inputAsset
                [weakSelf processWithAsset:inputAsset];
            });
        }];
        
    }
    
    //处理获取到的asset
    - (void)processWithAsset:(AVAsset *)asset
    {
        //锁定
        [lock lock];
        NSLog(@"processWithAsset");
        NSError *error = nil;
        
        //1.创建AVAssetReader
        assetReader = [AVAssetReader assetReaderWithAsset:asset error:&error];
        
        //2.kCVPixelBufferPixelFormatTypeKey 像素格式.
        /*
         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange : 420v
         kCVPixelFormatType_32BGRA : iOS在内部进行YUV至BGRA格式转换
         */
        NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary];
        [outputSettings setObject:@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) forKey:(id)kCVPixelBufferPixelFormatTypeKey];
        
        /*3. 设置readerVideoTrackOutput
         assetReaderTrackOutputWithTrack:(AVAssetTrack *)track outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings
         参数1: 表示读取资源中什么信息
         参数2: 视频参数
         */
        readerVideoTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:[[asset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] outputSettings:outputSettings];
        
        //alwaysCopiesSampleData : 表示缓存区的数据输出之前是否会被复制.YES:输出总是从缓存区提供复制的数据,你可以自由的修改这些缓存区数据
        readerVideoTrackOutput.alwaysCopiesSampleData = NO;
        
        //4.为assetReader 填充输出
        [assetReader addOutput:readerVideoTrackOutput];
        
        //5.assetReader 开始读取.并且判断是否开始.
        if ([assetReader startReading] == NO)
        {
            NSLog(@"Error reading from file at URL: %@", asset);
        }
        
        //取消锁
        [lock unlock];
    }
    
    //读取Buffer 数据
    - (CMSampleBufferRef)readBuffer {
        //锁定
        [lock lock];
        CMSampleBufferRef sampleBufferRef = nil;
        
        //1.判断readerVideoTrackOutput 是否创建成功.
        if (readerVideoTrackOutput) {
           
            //复制下一个缓存区的内容到sampleBufferRef
            sampleBufferRef = [readerVideoTrackOutput copyNextSampleBuffer];
        }
        
        //2.判断assetReader 并且status 是已经完成读取 则重新清空readerVideoTrackOutput/assetReader.并重新初始化它们
        if (assetReader && assetReader.status == AVAssetReaderStatusCompleted) {
            NSLog(@"customInit");
            readerVideoTrackOutput = nil;
            assetReader = nil;
            [self setUpAsset];
        }
        
        //取消锁
        [lock unlock];
        
        //3.返回读取到的sampleBufferRef 数据
        return sampleBufferRef;
    }
    
    @end
    

    二、ViewController

    • 准备工作
    1. MTKView 设置
    2. RenderAssetReader设置
    3. 渲染管道设置
    4. 顶点数据设置
    5. 转换矩阵设置
    - (void)viewDidLoad {
        [super viewDidLoad];
    
        //1.MTKView 设置
        [self setupMTKView];
        //2.RenderAssetReader设置
        [self setupCCAsset];
        //3.渲染管道设置
        [self setupPipeline];
        //4.顶点数据设置
        [self setupVertex];
        //5.转换矩阵设置
        [self setupMatrix];
        
    }
    
    #pragma mark -- setup init
     //1.MTKView 设置
    -(void)setupMTKView{
        
        //1.初始化mtkView
        self.mtkView = [[MTKView alloc] initWithFrame:self.view.bounds];
        // 获取默认的device
        self.mtkView.device = MTLCreateSystemDefaultDevice();
        //设置self.view = self.mtkView;
        self.view = self.mtkView;
        //设置代理
        self.mtkView.delegate = self;
        //获取视口size
        self.viewportSize = (vector_uint2){self.mtkView.drawableSize.width, self.mtkView.drawableSize.height};
    }
    
    //2.RenderAssetReader设置
    -(void)setupCCAsset{
        
        //注意RenderAssetReader 支持MOV/MP4文件都可以
        //1.视频文件路径
        //NSURL *url = [[NSBundle mainBundle] URLForResource:@"Metal" withExtension:@"mov"];
        NSURL *url = [[NSBundle mainBundle] URLForResource:@"Metal" withExtension:@"mp4"];
        
        //2.初始化RenderAssetReader
        self.reader = [[RenderAssetReader alloc] initWithUrl:url];
        
        //3._textureCache的创建(通过CoreVideo提供给CPU/GPU高速缓存通道读取纹理数据)
        CVMetalTextureCacheCreate(NULL, NULL, self.mtkView.device, NULL, &_textureCache);
        
    }
    
    // 3.设置渲染管道
    -(void)setupPipeline {
        
        //1 获取.metal
        /*
         newDefaultLibrary: 默认一个metal 文件时,推荐使用
         newLibraryWithFile:error: 从Library 指定读取metal 文件
         newLibraryWithData:error: 从Data 中获取metal 文件
         */
        id<MTLLibrary> defaultLibrary = [self.mtkView.device newDefaultLibrary];
        // 顶点shader,vertexShader是函数名
        id<MTLFunction> vertexFunction = [defaultLibrary newFunctionWithName:@"vertexShader"];
        // 片元shader,samplingShader是函数名
        id<MTLFunction> fragmentFunction = [defaultLibrary newFunctionWithName:@"samplingShader"];
        
        //2.渲染管道描述信息类
        MTLRenderPipelineDescriptor *pipelineStateDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
        //设置vertexFunction
        pipelineStateDescriptor.vertexFunction = vertexFunction;
        //设置fragmentFunction
        pipelineStateDescriptor.fragmentFunction = fragmentFunction;
        // 设置颜色格式
        pipelineStateDescriptor.colorAttachments[0].pixelFormat = self.mtkView.colorPixelFormat;
        
        //3.初始化渲染管道根据渲染管道描述信息
        // 创建图形渲染管道,耗性能操作不宜频繁调用
        self.pipelineState = [self.mtkView.device newRenderPipelineStateWithDescriptor:pipelineStateDescriptor
                                                                                 error:NULL];
        
        //4.CommandQueue是渲染指令队列,保证渲染指令有序地提交到GPU
        self.commandQueue = [self.mtkView.device newCommandQueue];
    }
    
    // 4.设置顶点
    - (void)setupVertex {
        
        //1.顶点坐标(x,y,z,w);纹理坐标(x,y)
        //注意: 为了让视频全屏铺满,所以顶点大小均设置[-1,1]
        static const CCVertex quadVertices[] =
        {   // 顶点坐标,分别是x、y、z、w;    纹理坐标,x、y;
            { {  1.0, -1.0, 0.0, 1.0 },  { 1.f, 1.f } },
            { { -1.0, -1.0, 0.0, 1.0 },  { 0.f, 1.f } },
            { { -1.0,  1.0, 0.0, 1.0 },  { 0.f, 0.f } },
            
            { {  1.0, -1.0, 0.0, 1.0 },  { 1.f, 1.f } },
            { { -1.0,  1.0, 0.0, 1.0 },  { 0.f, 0.f } },
            { {  1.0,  1.0, 0.0, 1.0 },  { 1.f, 0.f } },
        };
        
        //2.创建顶点缓存区
        self.vertices = [self.mtkView.device newBufferWithBytes:quadVertices
                                                         length:sizeof(quadVertices)
                                                        options:MTLResourceStorageModeShared];
        //3.计算顶点个数
        self.numVertices = sizeof(quadVertices) / sizeof(CCVertex);
    }
    
    
    // 设置YUV->RGB转换的矩阵
    - (void)setupMatrix {
        
        //1.转化矩阵
        // BT.601, which is the standard for SDTV.
        matrix_float3x3 kColorConversion601DefaultMatrix = (matrix_float3x3){
            (simd_float3){1.164,  1.164, 1.164},
            (simd_float3){0.0, -0.392, 2.017},
            (simd_float3){1.596, -0.813,   0.0},
        };
        
        // BT.601 full range
        matrix_float3x3 kColorConversion601FullRangeMatrix = (matrix_float3x3){
            (simd_float3){1.0,    1.0,    1.0},
            (simd_float3){0.0,    -0.343, 1.765},
            (simd_float3){1.4,    -0.711, 0.0},
        };
       
        // BT.709, which is the standard for HDTV.
        matrix_float3x3 kColorConversion709DefaultMatrix[] = {
            (simd_float3){1.164,  1.164, 1.164},
            (simd_float3){0.0, -0.213, 2.112},
            (simd_float3){1.793, -0.533,   0.0},
        };
        
        //2.偏移量
        vector_float3 kColorConversion601FullRangeOffset = (vector_float3){ -(16.0/255.0), -0.5, -0.5};
        
        //3.创建转化矩阵结构体.
        CCConvertMatrix matrix;
        //设置转化矩阵
        /*
         kColorConversion601DefaultMatrix;
         kColorConversion601FullRangeMatrix;
         kColorConversion709DefaultMatrix;
         */
        matrix.matrix = kColorConversion601FullRangeMatrix;
        //设置offset偏移量
        matrix.offset = kColorConversion601FullRangeOffset;
        
        //4.创建转换矩阵缓存区.
        self.convertMatrix = [self.mtkView.device newBufferWithBytes:&matrix
                                                            length:sizeof(CCConvertMatrix)
                                                    options:MTLResourceStorageModeShared];
    }
    
    • 绘制
    1. Metal相关绘制工作
    2. 读取视频帧数据->Y/UV纹理
    3. 传递顶点/片元数据
    4. 绘制
    // 设置纹理
    - (void)setupTextureWithEncoder:(id<MTLRenderCommandEncoder>)encoder buffer:(CMSampleBufferRef)sampleBuffer {
        
        //1.从CMSampleBuffer读取CVPixelBuffer,
        CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
        
        id<MTLTexture> textureY = nil;
        id<MTLTexture> textureUV = nil;
       
        //textureY 设置
        {
            //2.获取纹理的宽高
            size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0);
            size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0);
            
            //3.像素格式:普通格式,包含一个8位规范化的无符号整数组件。
            MTLPixelFormat pixelFormat = MTLPixelFormatR8Unorm;
            
            //4.创建CoreVideo的Metal纹理
            CVMetalTextureRef texture = NULL;
            
            /*5. 根据视频像素缓存区 创建 Metal 纹理缓存区
             CVReturn CVMetalTextureCacheCreateTextureFromImage(CFAllocatorRef allocator,
             CVMetalTextureCacheRef textureCache,
             CVImageBufferRef sourceImage,
             CFDictionaryRef textureAttributes,
             MTLPixelFormat pixelFormat,
             size_t width,
             size_t height,
             size_t planeIndex,
             CVMetalTextureRef  *textureOut);
             
             功能: 从现有图像缓冲区创建核心视频Metal纹理缓冲区。
             参数1: allocator 内存分配器,默认kCFAllocatorDefault
             参数2: textureCache 纹理缓存区对象
             参数3: sourceImage 视频图像缓冲区
             参数4: textureAttributes 纹理参数字典.默认为NULL
             参数5: pixelFormat 图像缓存区数据的Metal 像素格式常量.注意如果MTLPixelFormatBGRA8Unorm和摄像头采集时设置的颜色格式不一致,则会出现图像异常的情况;
             参数6: width,纹理图像的宽度(像素)
             参数7: height,纹理图像的高度(像素)
             参数8: planeIndex.如果图像缓冲区是平面的,则为映射纹理数据的平面索引。对于非平面图像缓冲区忽略。
             参数9: textureOut,返回时,返回创建的Metal纹理缓冲区。
             */
            CVReturn status = CVMetalTextureCacheCreateTextureFromImage(NULL, self.textureCache, pixelBuffer, NULL, pixelFormat, width, height, 0, &texture);
            
            //6.判断textureCache 是否创建成功
            if(status == kCVReturnSuccess)
            {
                //7.转成Metal用的纹理
                textureY = CVMetalTextureGetTexture(texture);
               
                //8.使用完毕释放
                CFRelease(texture);
            }
        }
        
        //9.textureUV 设置(同理,参考于textureY 设置)
        {
            size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 1);
            size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 1);
            MTLPixelFormat pixelFormat = MTLPixelFormatRG8Unorm;
            CVMetalTextureRef texture = NULL;
            CVReturn status = CVMetalTextureCacheCreateTextureFromImage(NULL, self.textureCache, pixelBuffer, NULL, pixelFormat, width, height, 1, &texture);
            if(status == kCVReturnSuccess)
            {
                textureUV = CVMetalTextureGetTexture(texture);
                CFRelease(texture);
            }
        }
        
        //10.判断textureY 和 textureUV 是否读取成功
        if(textureY != nil && textureUV != nil)
        {
            //11.向片元函数设置textureY 纹理
            [encoder setFragmentTexture:textureY atIndex:CCFragmentTextureIndexTextureY];
            //12.向片元函数设置textureUV 纹理
            [encoder setFragmentTexture:textureUV atIndex:CCFragmentTextureIndexTextureUV];
        }
        
        //13.使用完毕,则将sampleBuffer 及时释放
        CFRelease(sampleBuffer);
    }
    
    #pragma mark -- MTKView Delegate
    //当MTKView size 改变则修改self.viewportSize
    - (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
        
        self.viewportSize = (vector_uint2){size.width, size.height};
    
    }
    
    //视图绘制
    - (void)drawInMTKView:(MTKView *)view {
      
        //1.每次渲染都要单独创建一个CommandBuffer
        id<MTLCommandBuffer> commandBuffer = [self.commandQueue commandBuffer];
        //获取渲染描述信息
        MTLRenderPassDescriptor *renderPassDescriptor = view.currentRenderPassDescriptor;
       
        //2. 从RenderAssetReader中读取图像数据
        CMSampleBufferRef sampleBuffer = [self.reader readBuffer];
        
        //3.判断renderPassDescriptor 和 sampleBuffer 是否已经获取到了?
        if(renderPassDescriptor && sampleBuffer)
        {
            //4.设置renderPassDescriptor中颜色附着(默认背景色)
            renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.5, 0.5, 1.0f);
            
            //5.根据渲染描述信息创建渲染命令编码器
            id<MTLRenderCommandEncoder> renderEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDescriptor];
            
            //6.设置视口大小(显示区域)
            [renderEncoder setViewport:(MTLViewport){0.0, 0.0, self.viewportSize.x, self.viewportSize.y, -1.0, 1.0 }];
            
            //7.为渲染编码器设置渲染管道
            [renderEncoder setRenderPipelineState:self.pipelineState];
            
            //8.设置顶点缓存区
            [renderEncoder setVertexBuffer:self.vertices
                                    offset:0
                                   atIndex:CCVertexInputIndexVertices];
            
            //9.设置纹理(将sampleBuffer数据 设置到renderEncoder 中)
            [self setupTextureWithEncoder:renderEncoder buffer:sampleBuffer];
            
            //10.设置片元函数转化矩阵
            [renderEncoder setFragmentBuffer:self.convertMatrix
                                      offset:0
                                     atIndex:CCFragmentInputIndexMatrix];
            
            //11.开始绘制
            [renderEncoder drawPrimitives:MTLPrimitiveTypeTriangle
                              vertexStart:0
                              vertexCount:self.numVertices];
            
            //12.结束编码
            [renderEncoder endEncoding];
            
            //13.显示
            [commandBuffer presentDrawable:view.currentDrawable];
        }
        
        //14.提交命令
        [commandBuffer commit];
        
    }
    
    
    

    三、Metal

    1. 顶点函数(简单顶点赋值)
    2. 片元函数(纹理采样器/YUV值/YUV->RGB/RGBA)
    #include <metal_stdlib>
    //使用命名空间 Metal
    using namespace metal;
    
    // 导入Metal shader 代码和执行Metal API命令的C代码之间共享的头
    #import "RenderShaderTypes.h"
    
    
    //结构体(用于顶点函数输出/片元函数输入)
    typedef struct
    {
        float4 clipSpacePosition [[position]]; // position的修饰符表示这个是顶点
        
        float2 textureCoordinate; // 纹理坐标
        
    } RasterizerData;
    
    //RasterizerData 返回数据类型->片元函数
    // vertex_id是顶点shader每次处理的index,用于定位当前的顶点
    // buffer表明是缓存数据,0是索引
    vertex RasterizerData
    vertexShader(uint vertexID [[ vertex_id ]],
                 constant CCVertex *vertexArray [[ buffer(CCVertexInputIndexVertices) ]])
    {
        RasterizerData out;
        //顶点坐标
        out.clipSpacePosition = vertexArray[vertexID].position;
        //纹理坐标
        out.textureCoordinate = vertexArray[vertexID].textureCoordinate;
        return out;
    }
    
    
    //YUV->RGB 参考学习链接: https://mp.weixin.qq.com/s/KKfkS5QpwPAdYcEwFAN9VA
    // stage_in表示这个数据来自光栅化。(光栅化是顶点处理之后的步骤,业务层无法修改)
    // texture表明是纹理数据,CCFragmentTextureIndexTextureY是索引
    // texture表明是纹理数据,CCFragmentTextureIndexTextureUV是索引
    // buffer表明是缓存数据, CCFragmentInputIndexMatrix是索引
    fragment float4
    samplingShader(RasterizerData input [[stage_in]],
                   texture2d<float> textureY [[ texture(CCFragmentTextureIndexTextureY) ]],
                   texture2d<float> textureUV [[ texture(CCFragmentTextureIndexTextureUV) ]],
                   constant CCConvertMatrix *convertMatrix [[ buffer(CCFragmentInputIndexMatrix) ]])
    {
        //1.获取纹理采样器
        constexpr sampler textureSampler (mag_filter::linear,
                                          min_filter::linear);
        /*
         2. 读取YUV 颜色值
            textureY.sample(textureSampler, input.textureCoordinate).r
            从textureY中的纹理采集器中读取,纹理坐标对应上的R值.(Y)
            textureUV.sample(textureSampler, input.textureCoordinate).rg
            从textureUV中的纹理采集器中读取,纹理坐标对应上的RG值.(UV)
         */
        float3 yuv = float3(textureY.sample(textureSampler, input.textureCoordinate).r,
                            textureUV.sample(textureSampler, input.textureCoordinate).rg);
        
        //3.将YUV 转化为 RGB值.convertMatrix->matrix * (YUV + convertMatrix->offset)
        float3 rgb = convertMatrix->matrix * (yuv + convertMatrix->offset);
        
        //4.返回颜色值(RGBA)
        return float4(rgb, 1.0);
    }
    

    案例Demo:
    MetalRenderMOV

    相关文章

      网友评论

          本文标题:Metal案例:视频文件渲染

          本文链接:https://www.haomeiwen.com/subject/ffljsktx.html