我们将用摄像头捕获的图片数据,手动渲染到自己的imageView上面
#pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate -
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
NSLog(@"---%@",[NSThread currentThread]);
/*
CMSampleBufferRef:帧缓存数据,描述当前帧信息
获取帧缓存信息 : CMSampleBufferGet
CMSampleBufferGetDuration:获取当前帧播放时间
CMSampleBufferGetImageBuffer:获取当前帧图片信息
*/
// CoreImage:底层绘制图片
// 获取图片帧数据
// 获取帧播放时间
// CMTime durtion = CMSampleBufferGetDuration(sampleBuffer);
CVImageBufferRef imagebufferRef = CMSampleBufferGetImageBuffer(sampleBuffer);
//将帧图片数据转成CoreImage数据
CIImage *image = [CIImage imageWithCVImageBuffer:imagebufferRef];
//将CoreImage数据转成UIIimage
UIImage *img = [UIImage imageWithCIImage:image];
//现实img数据,因为这个方法是在子线程中调用的,所以更新UI要回调主线程
dispatch_sync(dispatch_get_main_queue(), ^
{
self.imageView.image = img;
//此时的渲染还有显示方向的问题,横竖屏显示相反
//我们需要把它转过来,
});
}
这里需要对展示的imageView进行初始化的设置,以解决显示,宽高不正确的bug
- (UIImageView *)imageView
{
if (_imageView == nil)
{
UIImageView *imageView = [[UIImageView alloc] init];
//更改锚点
CGFloat width = (SCREEN_WIDTH / SCREEN_HEIGHT) *0.5;
NSLog(@"%f", width);
//锚点设置0~1
imageView.layer.anchorPoint = CGPointMake(width , 0.5);
imageView.frame = CGRectMake(0, 0, SCREEN_HEIGHT, SCREEN_WIDTH);
imageView.transform = CGAffineTransformMakeRotation(M_PI_2);
_imageView = imageView;
[self.view addSubview:_imageView];
}
return _imageView;
}
- 利用connection设置来解决相识方向不对的bug,这里就不需要再去自己手动调整uiimage的frame了
///设置输出
//帧率
-(void)setupOutput
{
//苹果不支持YUA渲染,只支持RGB渲染 -> YUV => RGB
//3.1创建输出轨道
AVCaptureVideoDataOutput *videoOutput = [[AVCaptureVideoDataOutput alloc] init];
//设置帧率
videoOutput.minFrameDuration = CMTimeMake(1, 10);
//3.2设置视频源数据格式videoSettings 设置视频原数据格式 YUV FULL
videoOutput.videoSettings = @{(NSString *)(kCVPixelBufferPixelFormatTypeKey):@(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)};
//3.2设置输出代理,捕获每一帧的数据
[videoOutput setSampleBufferDelegate:self queue:dispatch_queue_create("SERIAL", DISPATCH_QUEUE_SERIAL)];
//3.2 捕获设备,与输出设备相连接
AVCaptureConnection *connection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
// NSLog(@"---1%@",connection);
//3.3给回话添加输出轨道
if ([self.session canAddOutput:videoOutput])
{
[self.session addOutput:videoOutput];
}
//这个connection可以帮助我们做很多的事情,比如帧率,朝向,镜像操作等
connection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
connection.videoMinFrameDuration = CMTimeMake(1, 10);//first deprecated in iOS 7.0 - Use AVCaptureDevice's activeVideoMinFrameDuration instead. 帧率
//是否镜像
connection.videoMirrored = YES;
//视屏朝向 ,这里设置的话就不要我们自己去调整,自定义的imageView视图预览层了;
connection.videoOrientation =
AVCaptureVideoOrientationPortrait;
}
- 利用OpenGL来是想图片的渲染
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
/*
CMSampleBufferRef:帧缓存数据,描述当前帧信息
获取帧缓存信息 : CMSampleBufferGet
CMSampleBufferGetDuration:获取当前帧播放时间
CMSampleBufferGetImageBuffer:获取当前帧图片信息
*/
// CoreImage:底层绘制图片
// 获取图片帧数据
/*
CPU:
GPU:显存 OpenGL
*/
CVImageBufferRef imagebufferRef = CMSampleBufferGetImageBuffer(sampleBuffer);
//
CIImage *image = [CIImage imageWithCVImageBuffer:imagebufferRef];
//利用OpenGL来实现预览层的渲染,他使用的是GPU,相对来讲性能更好
// OpenGL上下文:EAGLContext,并选择一个渲染的API
EAGLContext *ctx = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
//获取一个CoreImage上下文
CIContext *context = [CIContext contextWithEAGLContext:ctx];
// 创建CGImageRef
// image.extent:获取图片尺寸 GPU生成一张图片
// 当使用CoreFoundation里面的框架时候出现create retain copy 需要自己管理内存 C语言API 自己去release
CGImageRef imgRef = [context createCGImage:image fromRect:image.extent];
UIImage *img = [UIImage imageWithCGImage:imgRef];
dispatch_sync(dispatch_get_main_queue(), ^
{
self.imageView.image = img;
CGImageRelease(imgRef);
});
// CoreImage:底层绘制图片
}
网友评论