Metal--视频渲染

作者: 黑眼豆豆_ | 来源:发表于2020-09-07 10:11 被阅读0次

    案例效果图如下:


    视频渲染.gif

    案例流程如下:

    • 使用自定义的CCAssetReader工具类,读取mov/mp4视频文件
    • Metal渲染回调 还原成CMSampleBufferRef图像数据,然后将读取到CVPixelBufferRef视频像素缓存区
    • 通过CoreVideo获取Y纹理,UV纹理
    • 在自定义着色器将颜色编码格式由YUV转换为RGB,显示到屏幕上

    CCAssetReader

    CCAssetReader的功能与AVAssetReader与类似。

    AVAssetReader功能

    • 直接从存储中读取原始未解码的媒体样本,获取解码为可渲染形式的样本。
    • 混合资产的多个⾳轨,并使⽤和组合多个视频轨道

    流程图如下:

    AVAssetReader.png

    CCAssetReader代码

    • CCAssetReader.h
    //
    //  CCAssetReader.h
    //  002--MetalRenderMOV
    //
    //  Created by CC老师 on 2019/5/7.
    //  Copyright © 2019年 CC老师. All rights reserved.
    //
    
    #import <Foundation/Foundation.h>
    #import <AVFoundation/AVFoundation.h>
    @interface CCAssetReader : NSObject
    
    //初始化
    - (instancetype)initWithUrl:(NSURL *)url;
    
    //从MOV文件读取CMSampleBufferRef 数据
    - (CMSampleBufferRef)readBuffer;
    @end
    
    • CCAssetReader.m
    //
    //  CCAssetReader.m
    //  002--MetalRenderMOV
    //
    //  Created by CC老师 on 2019/5/7.
    //  Copyright © 2019年 CC老师. All rights reserved.
    //
    
    #import "CCAssetReader.h"
    
    @implementation CCAssetReader
    {
        //轨道
        AVAssetReaderTrackOutput *readerVideoTrackOutput;
        //AVAssetReader可以从原始数据里获取解码后的音视频数据
        AVAssetReader   *assetReader;
        //视频地址
        NSURL *videoUrl;
        //锁
        NSLock *lock;
    }
    
    //初始化
    - (instancetype)initWithUrl:(NSURL *)url{
        
        self = [super init];
        if(self != nil)
        {
            videoUrl = url;
            lock = [[NSLock alloc]init];
            [self setUpAsset];
        }
        return self;
    }
    
    //Asset 相关设置
    -(void)setUpAsset{
       
        //AVURLAssetPreferPreciseDurationAndTimingKey 默认为NO,YES表示提供精确的时长
        NSDictionary *inputOptions = [NSDictionary dictionaryWithObject:[NSNumber numberWithBool:YES] forKey:AVURLAssetPreferPreciseDurationAndTimingKey];
        
        //1. 创建AVURLAsset 是AVAsset 子类,用于从本地/远程URL初始化资源
        AVURLAsset *inputAsset = [[AVURLAsset alloc] initWithURL:videoUrl options:inputOptions];
        
        //2.异步加载资源
        //weakSelf 解决循环引用
        __weak typeof(self) weakSelf = self;
        
        //定义属性名称
        NSString *tracks = @"tracks";
       
        //对资源所需的键执行标准的异步载入操作,这样就可以访问资源的tracks属性时,就不会受到阻碍.
        [inputAsset loadValuesAsynchronouslyForKeys:@[tracks] completionHandler: ^{
            
                //延长self 生命周期
                __strong typeof(self) strongSelf = weakSelf;
           
          //开辟子线程并发队列异步函数来处理读取的inputAsset
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
                NSError *error = nil;
        
                //获取状态码.
                AVKeyValueStatus tracksStatus = [inputAsset statusOfValueForKey:@"tracks" error:&error];
                //如果状态不等于成功加载,则返回并打印错误信息
                if (tracksStatus != AVKeyValueStatusLoaded)
                {
                    NSLog(@"error %@", error);
                    return;
                }
                //处理读取的inputAsset
                [weakSelf processWithAsset:inputAsset];
            });
        }];
        
    }
    
    //处理获取到的asset
    - (void)processWithAsset:(AVAsset *)asset
    {
        //锁定
        [lock lock];
        NSLog(@"processWithAsset");
        NSError *error = nil;
        
        //1.创建AVAssetReader
        assetReader = [AVAssetReader assetReaderWithAsset:asset error:&error];
        
        //2.kCVPixelBufferPixelFormatTypeKey 像素格式.
        /*
         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange : 420v
         kCVPixelFormatType_32BGRA : iOS在内部进行YUV至BGRA格式转换
         */
        NSMutableDictionary *outputSettings = [NSMutableDictionary dictionary];
        [outputSettings setObject:@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) forKey:(id)kCVPixelBufferPixelFormatTypeKey];
        
        /*3. 设置readerVideoTrackOutput
         assetReaderTrackOutputWithTrack:(AVAssetTrack *)track outputSettings:(nullable NSDictionary<NSString *, id> *)outputSettings
         参数1: 表示读取资源中什么信息
         参数2: 视频参数
         */
        readerVideoTrackOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:[[asset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] outputSettings:outputSettings];
        
        //alwaysCopiesSampleData : 表示缓存区的数据输出之前是否会被复制.YES:输出总是从缓存区提供复制的数据,你可以自由的修改这些缓存区数据
        readerVideoTrackOutput.alwaysCopiesSampleData = NO;
        
        //4.为assetReader 填充输出
        [assetReader addOutput:readerVideoTrackOutput];
        
        //5.assetReader 开始读取.并且判断是否开始.
        if ([assetReader startReading] == NO)
        {
            NSLog(@"Error reading from file at URL: %@", asset);
        }
        
        //取消锁
        [lock unlock];
    }
    
    //读取Buffer 数据
    - (CMSampleBufferRef)readBuffer {
        //锁定
        [lock lock];
        CMSampleBufferRef sampleBufferRef = nil;
        
        //1.判断readerVideoTrackOutput 是否创建成功.
        if (readerVideoTrackOutput) {
           
            //复制下一个缓存区的内容到sampleBufferRef
            sampleBufferRef = [readerVideoTrackOutput copyNextSampleBuffer];
        }
        
        //2.判断assetReader 并且status 是已经完成读取 则重新清空readerVideoTrackOutput/assetReader.并重新初始化它们
        if (assetReader && assetReader.status == AVAssetReaderStatusCompleted) {
            NSLog(@"customInit");
            readerVideoTrackOutput = nil;
            assetReader = nil;
            [self setUpAsset];
        }
        
        //取消锁
        [lock unlock];
        
        //3.返回读取到的sampleBufferRef 数据
        return sampleBufferRef;
    }
    @end
    

    LeoShaderTypes.h

    • 定义一个结构体,存储顶点结构数据
    typedef struct
    {
        //顶点坐标(x,y,z,w)
        vector_float4 position;
        //纹理坐标(s,t)
        vector_float2 textureCoordinate;
    } LeoVertex;
    
    • 设置一个转换矩阵机构体,从YUV转换到RGB
    //转换矩阵 YUV - RGB转换矩阵结构
    typedef struct {
        //三维矩阵
        matrix_float3x3 matrix;
        //偏移量
        vector_float3 offset;
    } LeoConvertMatrix;
    
    • 顶点索引数据
    //顶点函数输入索引
    typedef enum LeoVertexInputIndex
    {
        LeoVertexInputIndexVertices     = 0,
    } LeoVertexInputIndex;
    
    • 片元函数缓存区索引
    //片元函数缓存区索引
    typedef enum LeoFragmentBufferIndex
    {
        LeoFragmentInputIndexMatrix     = 0,
    } LeoFragmentBufferIndex;
    
    
    • 片元函数纹理索引
    //片元函数纹理索引
    typedef enum LeoFragmentTextureIndex
    {
        //Y纹理
        LeoFragmentTextureIndexTextureY     = 0,
        //UV纹理
        LeoFragmentTextureIndexTextureUV     = 1,
    } LeoFragmentTextureIndex;
    

    LeoShaders.metal

    • 定义一个结构体用来存放从顶点着色器到片元着色器的数据
    //结构体(用于顶点函数输出/片元函数输入)
    typedef struct{
        float4 clipSpacePosition [[position]]; // position的修饰符表示这个是顶点
        float2 textureCoordinate; // 纹理坐标    
    } RasterizerData;
    
    • 顶点着色器函数,这个项目中,我们只需要将顶点坐标和纹理坐标传递到片元着色器
    //RasterizerData 返回数据类型->片元函数
    // vertex_id是顶点shader每次处理的index,用于定位当前的顶点
    // buffer表明是缓存数据,0是索引
    vertex RasterizerData
    vertexShader(uint vertexID [[ vertex_id ]],
                 constant LeoVertex *vertexArray [[buffer(LeoVertexInputIndexVertices)]])
    {
        RasterizerData out;
        //顶点坐标
        out.clipSpacePosition = vertexArray[vertexID].position;
        //纹理坐标
        out.textureCoordinate = vertexArray[vertexID].textureCoordinate;
        return out;
    }
    
    • 片元着色器的任务就是讲YUV转换成RGB
    // stage_in表示这个数据来自光栅化。(光栅化是顶点处理之后的步骤,业务层无法修改)
    // texture表明是纹理数据,CCFragmentTextureIndexTextureY是索引
    // texture表明是纹理数据,CCFragmentTextureIndexTextureUV是索引
    // buffer表明是缓存数据, CCFragmentInputIndexMatrix是索引
    fragment float4
    samplingShader(RasterizerData input [[stage_in]],
                   texture2d<float> textureY [[ texture(LeoFragmentTextureIndexTextureY) ]],
                   texture2d<float> textureUV [[ texture(LeoFragmentTextureIndexTextureUV) ]],
                   constant LeoConvertMatrix *convertMatrix [[ buffer(LeoFragmentInputIndexMatrix) ]])
    {
        //1.获取纹理采样器
        constexpr sampler textureSampler (mag_filter::linear,
                                          min_filter::linear);
        /*
         2. 读取YUV 颜色值
            textureY.sample(textureSampler, input.textureCoordinate).r
            从textureY中的纹理采集器中读取,纹理坐标对应上的R值.(Y)
            textureUV.sample(textureSampler, input.textureCoordinate).rg
            从textureUV中的纹理采集器中读取,纹理坐标对应上的RG值.(UV)
         */
        float3 yuv = float3(textureY.sample(textureSampler, input.textureCoordinate).r,
                            textureUV.sample(textureSampler, input.textureCoordinate).rg);
        
        //3.将YUV 转化为 RGB值.convertMatrix->matrix * (YUV + convertMatrix->offset)
        float3 rgb = convertMatrix->matrix * (yuv + convertMatrix->offset);
        
        //4.返回颜色值(RGBA)
        return float4(rgb, 1.0);
    }
    

    ViewController

    • 初始化MTKView
    -(void)setupMTKView{
        //1.初始化mtkView
        self.mtkView = [[MTKView alloc] initWithFrame:self.view.bounds];
        // 获取默认的device
        self.mtkView.device = MTLCreateSystemDefaultDevice();
        //设置self.view = self.mtkView;
        self.view = self.mtkView;
        //设置代理
        self.mtkView.delegate = self;
        //获取视口size
        self.viewportSize = (vector_uint2){self.mtkView.drawableSize.width, self.mtkView.drawableSize.height};
    }
    
    • 设置CCAssetReader,同时创建纹理缓存CVMetalTextureCacheRef
    //2.CCAssetReader设置
    -(void)setupCCAsset{
        //注意CCAssetReader 支持MOV/MP4文件都可以
        //1.视频文件路径
        //NSURL *url = [[NSBundle mainBundle] URLForResource:@"kun" withExtension:@"mov"];
        NSURL *url = [[NSBundle mainBundle] URLForResource:@"kun2" withExtension:@"mp4"];
        //2.初始化CCAssetReader
        self.reader = [[CCAssetReader alloc] initWithUrl:url];
        //3._textureCache的创建(通过CoreVideo提供给CPU/GPU高速缓存通道读取纹理数据)
        CVMetalTextureCacheCreate(NULL, NULL, self.mtkView.device, NULL, &_textureCache);
    }
    
    • 设置渲染管道
    // 设置渲染管道
    -(void)setupPipeline {
        
        //1 获取.metal
        /*
         newDefaultLibrary: 默认一个metal 文件时,推荐使用
         newLibraryWithFile:error: 从Library 指定读取metal 文件
         newLibraryWithData:error: 从Data 中获取metal 文件
         */
        id<MTLLibrary> defaultLibrary = [self.mtkView.device newDefaultLibrary];
        // 顶点shader,vertexShader是函数名
        id<MTLFunction> vertexFunction = [defaultLibrary newFunctionWithName:@"vertexShader"];
        // 片元shader,samplingShader是函数名
        id<MTLFunction> fragmentFunction = [defaultLibrary newFunctionWithName:@"samplingShader"];
        
        //2.渲染管道描述信息类
        MTLRenderPipelineDescriptor *pipelineStateDescriptor = [[MTLRenderPipelineDescriptor alloc] init];
        //设置vertexFunction
        pipelineStateDescriptor.vertexFunction = vertexFunction;
        //设置fragmentFunction
        pipelineStateDescriptor.fragmentFunction = fragmentFunction;
        // 设置颜色格式
        pipelineStateDescriptor.colorAttachments[0].pixelFormat = self.mtkView.colorPixelFormat;
        
        //3.初始化渲染管道根据渲染管道描述信息
        // 创建图形渲染管道,耗性能操作不宜频繁调用
        self.pipelineState = [self.mtkView.device newRenderPipelineStateWithDescriptor:pipelineStateDescriptor
                                                                                 error:NULL];
        //4.CommandQueue是渲染指令队列,保证渲染指令有序地提交到GPU
        self.commandQueue = [self.mtkView.device newCommandQueue];
    }
    
    • 设置顶点
    // 设置顶点
    - (void)setupVertex {
        
        //1.顶点坐标(x,y,z,w);纹理坐标(x,y)
        //注意: 为了让视频全屏铺满,所以顶点大小均设置[-1,1]
        static const LeoVertex quadVertices[] =
        {   // 顶点坐标,分别是x、y、z、w;    纹理坐标,x、y;
            { {  1.0, -1.0, 0.0, 1.0 },  { 1.f, 1.f } },
            { { -1.0, -1.0, 0.0, 1.0 },  { 0.f, 1.f } },
            { { -1.0,  1.0, 0.0, 1.0 },  { 0.f, 0.f } },
            
            { {  1.0, -1.0, 0.0, 1.0 },  { 1.f, 1.f } },
            { { -1.0,  1.0, 0.0, 1.0 },  { 0.f, 0.f } },
            { {  1.0,  1.0, 0.0, 1.0 },  { 1.f, 0.f } },
        };
        
        //2.创建顶点缓存区
        self.vertices = [self.mtkView.device newBufferWithBytes:quadVertices
                                                         length:sizeof(quadVertices)
                                                        options:MTLResourceStorageModeShared];
        //3.计算顶点个数
        self.numVertices = sizeof(quadVertices) / sizeof(LeoVertex);
    }
    
    • 设置转换矩阵,在Metal中可以将RGB转换成YUV
    // 设置YUV->RGB转换的矩阵
    - (void)setupMatrix {
        
        //1.转化矩阵
        // BT.601, which is the standard for SDTV.
        matrix_float3x3 kColorConversion601DefaultMatrix = (matrix_float3x3){
            (simd_float3){1.164,  1.164, 1.164},
            (simd_float3){0.0, -0.392, 2.017},
            (simd_float3){1.596, -0.813,   0.0},
        };
        
        // BT.601 full range
        matrix_float3x3 kColorConversion601FullRangeMatrix = (matrix_float3x3){
            (simd_float3){1.0,    1.0,    1.0},
            (simd_float3){0.0,    -0.343, 1.765},
            (simd_float3){1.4,    -0.711, 0.0},
        };
       
        // BT.709, which is the standard for HDTV.
        matrix_float3x3 kColorConversion709DefaultMatrix[] = {
            (simd_float3){1.164,  1.164, 1.164},
            (simd_float3){0.0, -0.213, 2.112},
            (simd_float3){1.793, -0.533,   0.0},
        };
        
        //2.偏移量
        vector_float3 kColorConversion601FullRangeOffset = (vector_float3){ -(16.0/255.0), -0.5, -0.5};
        
        //3.创建转化矩阵结构体.
        LeoConvertMatrix matrix;
        //设置转化矩阵
        /*
         kColorConversion601DefaultMatrix;
         kColorConversion601FullRangeMatrix;
         kColorConversion709DefaultMatrix;
         */
        matrix.matrix = kColorConversion601FullRangeMatrix;
        //设置offset偏移量
        matrix.offset = kColorConversion601FullRangeOffset;
        
        //4.创建转换矩阵缓存区.
        self.convertMatrix = [self.mtkView.device newBufferWithBytes:&matrix
                                                            length:sizeof(LeoConvertMatrix)
                                                    options:MTLResourceStorageModeShared];
    }
    
    • 执行drawableSizeWillChange
    //当MTKView size 改变则修改self.viewportSize
    - (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
        //设置视口    
        self.viewportSize = (vector_uint2){size.width, size.height};
    }
    
    • 绘制视图
    //视图绘制
    - (void)drawInMTKView:(MTKView *)view {
      
        //1.每次渲染都要单独创建一个CommandBuffer
        id<MTLCommandBuffer> commandBuffer = [self.commandQueue commandBuffer];
        //获取渲染描述信息
        MTLRenderPassDescriptor *renderPassDescriptor = view.currentRenderPassDescriptor;
       
        //2. 从CCAssetReader中读取图像数据
        CMSampleBufferRef sampleBuffer = [self.reader readBuffer];
        
        //3.判断renderPassDescriptor 和 sampleBuffer 是否已经获取到了?
        if(renderPassDescriptor && sampleBuffer)
        {
            //4.设置renderPassDescriptor中颜色附着(默认背景色)
            renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.5, 0.5, 1.0f);
            
            //5.根据渲染描述信息创建渲染命令编码器
            id<MTLRenderCommandEncoder> renderEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDescriptor];
            
            //6.设置视口大小(显示区域)
            [renderEncoder setViewport:(MTLViewport){0.0, 0.0, self.viewportSize.x, self.viewportSize.y, -1.0, 1.0 }];
            
            //7.为渲染编码器设置渲染管道
            [renderEncoder setRenderPipelineState:self.pipelineState];
            
            //8.设置顶点缓存区
            [renderEncoder setVertexBuffer:self.vertices
                                    offset:0
                                   atIndex:LeoVertexInputIndexVertices];
            
            //9.设置纹理(将sampleBuffer数据 设置到renderEncoder 中)
            [self setupTextureWithEncoder:renderEncoder buffer:sampleBuffer];
            
            //10.设置片元函数转化矩阵
            [renderEncoder setFragmentBuffer:self.convertMatrix
                                      offset:0
                                     atIndex:LeoFragmentInputIndexMatrix];
            
            //11.开始绘制
            [renderEncoder drawPrimitives:MTLPrimitiveTypeTriangle
                              vertexStart:0
                              vertexCount:self.numVertices];
            
            //12.结束编码
            [renderEncoder endEncoding];
            
            //13.显示
            [commandBuffer presentDrawable:view.currentDrawable];
        }
        
        //14.提交命令
        [commandBuffer commit];
       
    }
    
    • sampleBuffer数据 设置到renderEncoder
    // 设置纹理
    - (void)setupTextureWithEncoder:(id<MTLRenderCommandEncoder>)encoder buffer:(CMSampleBufferRef)sampleBuffer {
        
        //1.从CMSampleBuffer读取CVPixelBuffer,
        CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
        
        id<MTLTexture> textureY = nil;
        id<MTLTexture> textureUV = nil;
       
        //textureY 设置
        {
            //2.获取纹理的宽高
            size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0);
            size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0);
            
            //3.像素格式:普通格式,包含一个8位规范化的无符号整数组件。
            MTLPixelFormat pixelFormat = MTLPixelFormatR8Unorm;
            
            //4.创建CoreVideo的Metal纹理
            CVMetalTextureRef texture = NULL;
            
            /*5. 根据视频像素缓存区 创建 Metal 纹理缓存区
             CVReturn CVMetalTextureCacheCreateTextureFromImage(CFAllocatorRef allocator,
             CVMetalTextureCacheRef textureCache,
             CVImageBufferRef sourceImage,
             CFDictionaryRef textureAttributes,
             MTLPixelFormat pixelFormat,
             size_t width,
             size_t height,
             size_t planeIndex,
             CVMetalTextureRef  *textureOut);
             
             功能: 从现有图像缓冲区创建核心视频Metal纹理缓冲区。
             参数1: allocator 内存分配器,默认kCFAllocatorDefault
             参数2: textureCache 纹理缓存区对象
             参数3: sourceImage 视频图像缓冲区
             参数4: textureAttributes 纹理参数字典.默认为NULL
             参数5: pixelFormat 图像缓存区数据的Metal 像素格式常量.注意如果MTLPixelFormatBGRA8Unorm和摄像头采集时设置的颜色格式不一致,则会出现图像异常的情况;
             参数6: width,纹理图像的宽度(像素)
             参数7: height,纹理图像的高度(像素)
             参数8: planeIndex.如果图像缓冲区是平面的,则为映射纹理数据的平面索引。对于非平面图像缓冲区忽略。
             参数9: textureOut,返回时,返回创建的Metal纹理缓冲区。
             */
            CVReturn status = CVMetalTextureCacheCreateTextureFromImage(NULL, self.textureCache, pixelBuffer, NULL, pixelFormat, width, height, 0, &texture);
            
            //6.判断textureCache 是否创建成功
            if(status == kCVReturnSuccess)
            {
                //7.转成Metal用的纹理
                textureY = CVMetalTextureGetTexture(texture);
               
                //8.使用完毕释放
                CFRelease(texture);
            }
        }
        
        //9.textureUV 设置(同理,参考于textureY 设置)
        {
            size_t width = CVPixelBufferGetWidthOfPlane(pixelBuffer, 1);
            size_t height = CVPixelBufferGetHeightOfPlane(pixelBuffer, 1);
            MTLPixelFormat pixelFormat = MTLPixelFormatRG8Unorm;
            CVMetalTextureRef texture = NULL;
            CVReturn status = CVMetalTextureCacheCreateTextureFromImage(NULL, self.textureCache, pixelBuffer, NULL, pixelFormat, width, height, 1, &texture);
            if(status == kCVReturnSuccess)
            {
                textureUV = CVMetalTextureGetTexture(texture);
                CFRelease(texture);
            }
        }
        
        //10.判断textureY 和 textureUV 是否读取成功
        if(textureY != nil && textureUV != nil)
        {
            //11.向片元函数设置textureY 纹理
            [encoder setFragmentTexture:textureY atIndex:LeoFragmentTextureIndexTextureY];
            //12.向片元函数设置textureUV 纹理
            [encoder setFragmentTexture:textureUV atIndex:LeoFragmentTextureIndexTextureUV];
        }
        
        //13.使用完毕,则将sampleBuffer 及时释放
        CFRelease(sampleBuffer);
    }
    

    相关文章

      网友评论

        本文标题:Metal--视频渲染

        本文链接:https://www.haomeiwen.com/subject/atrkektx.html