VideoToolbox 的详细使用
1 创建初始化VideoToolBox
VTCompressionSessionCreate 创建(创建编码session) -> VTSessionSetProperty 初始化参数 (设置参数) -> VTCompressionSessionPrepareToEncodeFrames 准备开始编码(开始编码)
(1) initVideoToolBox
///////////////////////----------------------------------------------创建session
int width = 480, height = 640;
OSStatus status = VTCompressionSessionCreate(NULL, width, height, kCMVideoCodecType_H264, NULL, NULL, NULL, didCompressH264, (__bridge void *)(self), &EncodingSession);
(2) 开始视频录制 , 传入-encode
调用 VTCompressionSessionEncodeFrame 传入需要编码的视频帧,失败 调用VTCompressionSessionInvalidate 销毁session
////////////////////////--------------------------------------------设置session属性
// 设置实时编码输出(避免延迟)
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_ProfileLevel, kVTProfileLevel_H264_Baseline_AutoLevel);
// 设置关键帧(GOPsize)间隔
int frameInterval = 10;
CFNumberRef frameIntervalRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &frameInterval);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, frameIntervalRef);
// 设置期望帧率
int fps = 10;
CFNumberRef fpsRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &fps);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_ExpectedFrameRate, fpsRef);
//设置码率,上限,单位是bps
int bitRate = width * height * 3 * 4 * 8;
CFNumberRef bitRateRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bitRate);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_AverageBitRate, bitRateRef);
//设置码率,均值,单位是byte
int bitRateLimit = width * height * 3 * 4;
CFNumberRef bitRateLimitRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bitRateLimit);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_DataRateLimits, bitRateLimitRef);
(3) 把每一帧视频编码完成后调用预先设置的编码函数didCompressH264, 如果是关键帧需要用 CMSampleBufferGetFormatDescription 获取 CMFormatDescriptionRef,然后用CMVideoFormatDescriptionGetH264ParameterSetAtIndex 取得PPS和SPS
////////////////////////////////////-----------------------------------------------传入编码帧
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
// 帧时间,如果不设置会导致时间轴过长。
CMTime presentationTimeStamp = CMTimeMake(frameID++, 1000);
VTEncodeInfoFlags flags;
OSStatus statusCode = VTCompressionSessionEncodeFrame(EncodingSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL, NULL, &flags);
///////////////////////////----------------------------------------- --------关键帧获取SPS和PPS
bool keyframe = !CFDictionaryContainsKey( (CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true), 0)), kCMSampleAttachmentKey_NotSync);
C // 判断当前帧是否为关键帧
// 获取sps & pps数据
if (keyframe)
{
CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
size_t sparameterSetSize, sparameterSetCount;
const uint8_t *sparameterSet;
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 0, &sparameterSet, &sparameterSetSize, &sparameterSetCount, 0 );
if (statusCode == noErr)
{
// Found sps and now check for pps
size_t pparameterSetSize, pparameterSetCount;
const uint8_t *pparameterSet;
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, 0 );
if (statusCode == noErr)
{
// Found pps
NSData *sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
NSData *pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
if (encoder)
{
[encoder gotSpsPps:sps pps:pps];
}
}
}
}
/////////////////////////////////////////////-----------------------------------------------写入数据
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int AVCCHeaderLength = 4; // 返回的nalu数据前四个字节不是0001的startcode,而是大端模式的帧长度length
// 循环获取nalu数据
while (bufferOffset < totalLength - AVCCHeaderLength) {
uint32_t NALUnitLength = 0;
// Read the NAL unit length
memcpy(&NALUnitLength, dataPointer + bufferOffset, AVCCHeaderLength);
// 从大端转系统端
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + AVCCHeaderLength) length:NALUnitLength];
[encoder gotEncodedData:data isKeyFrame:keyframe];
// Move to the next NAL unit in the block buffer
bufferOffset += AVCCHeaderLength + NALUnitLength;
}
}
最后把每一帧的所有NALU数据前四个字节变成0x00 00 00 01之后再写入文件;
(4)调用VTCompressionSessionCompleteFrames完成编码,然后销毁 session:VTCompressionSessionInvalidate , 释放session。
网友评论