美文网首页
人脸捕捉

人脸捕捉

作者: 管你爱不爱 | 来源:发表于2017-04-13 17:17 被阅读189次

    首先通过#import <AVFoundation/AVFoundation.h> 捕捉手机屏幕图像。
    主要代码:

    @interface ViewController ()<AVCaptureVideoDataOutputSampleBufferDelegate>
    //硬件设备
    @property (nonatomic, strong) AVCaptureDevice *device;
    //输入流
    @property (nonatomic, strong) AVCaptureDeviceInput *input;
    //协调输入输出流的数据
    @property (nonatomic, strong) AVCaptureSession *session;
    //预览层
    @property (nonatomic, strong) AVCaptureVideoPreviewLayer *previewLayer;
    //输出流
    @property (nonatomic, strong) AVCaptureVideoDataOutput *videoDataOutput;
    
    @end
    
    - (void)viewDidLoad {
        [super viewDidLoad];
        [self.view.layer addSublayer:self.previewLayer];
        [self.session startRunning];
    }
    #pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate
    //AVCaptureVideoDataOutput获取实时图像,这个代理方法的回调频率很快,几乎与手机屏幕的刷新频率一样快
    - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
        //设置图像方向,否则largeImage取出来是反的
        [connection setVideoOrientation:AVCaptureVideoOrientationPortrait];
        //获取图像图片(使用下面人脸捕捉代码即可)   
     largeImage = [self imageFromSampleBuffer:sampleBuffer];
    }
    
    //CMSampleBufferRef转NSImage
    -(UIImage *)imageFromSampleBuffer:(CMSampleBufferRef)sampleBuffer{
        // 为媒体数据设置一个CMSampleBuffer的Core Video图像缓存对象
        CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
        // 锁定pixel buffer的基地址
        CVPixelBufferLockBaseAddress(imageBuffer, 0);
        // 得到pixel buffer的基地址
        void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
        // 得到pixel buffer的行字节数
        size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
        // 得到pixel buffer的宽和高
        size_t width = CVPixelBufferGetWidth(imageBuffer);
        size_t height = CVPixelBufferGetHeight(imageBuffer);
        // 创建一个依赖于设备的RGB颜色空间
        CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
        // 用抽样缓存的数据创建一个位图格式的图形上下文(graphics context)对象
        CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
        // 根据这个位图context中的像素数据创建一个Quartz image对象
        CGImageRef quartzImage = CGBitmapContextCreateImage(context);
        // 解锁pixel buffer
        CVPixelBufferUnlockBaseAddress(imageBuffer,0);
        // 释放context和颜色空间
        CGContextRelease(context); CGColorSpaceRelease(colorSpace);
        // 用Quartz image创建一个UIImage对象image
        UIImage *image = [UIImage imageWithCGImage:quartzImage];
        // 释放Quartz image对象
        CGImageRelease(quartzImage);
        return (image);
    }
    
    
    -(AVCaptureDeviceInput *)input{
        if (_input == nil) {
            _input = [[AVCaptureDeviceInput alloc] initWithDevice:self.device error:nil];
        }
        return _input;
    }
    
    
    -(AVCaptureVideoDataOutput *)videoDataOutput{
        if (_videoDataOutput == nil) {
            _videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
            [_videoDataOutput setSampleBufferDelegate:self queue:dispatch_get_main_queue()];
            //设置像素格式,否则CMSampleBufferRef转换NSImage的时候CGContextRef初始化会出问题
            [_videoDataOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
        }
        return _videoDataOutput;
    }
    
    -(AVCaptureSession *)session{
        if (_session == nil) {
            _session = [[AVCaptureSession alloc] init];
            if ([_session canAddInput:self.input]) {
                [_session addInput:self.input];
            }
            if ([_session canAddOutput:self.videoDataOutput]) {
                [_session addOutput:self.videoDataOutput];
            }
        }
        return _session;
    }
    
    -(AVCaptureVideoPreviewLayer *)previewLayer{
        if (_previewLayer == nil) {
            _previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:self.session];
            _previewLayer.frame = self.view.layer.bounds;
        }
        return _previewLayer;
    }
    

    主要使用#import <CoreImage/CoreImage.h>框架捕捉,简单代码如下

    // 图像识别能力:可以在CIDetectorAccuracyHigh(较强的处理能力)与CIDetectorAccuracyLow(较弱的处理能力)中选择,因为想让准确度高一些在这里选择CIDetectorAccuracyHigh
        NSDictionary *opts = [NSDictionary dictionaryWithObject:
                              CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
        // 将图像转换为CIImage
        CIImage *faceImage = [CIImage imageWithCGImage:image.CGImage];
        CIDetector *faceDetector=[CIDetector detectorOfType:CIDetectorTypeFace context:nil options:opts];
        // 识别出人脸数组
        NSArray *features = [faceDetector featuresInImage:faceImage options: [NSDictionary dictionaryWithObject:@(5) forKey:CIDetectorImageOrientation]];
    

    iOS技术群

    iOS开发交流群群二维码.png

    相关文章

      网友评论

          本文标题:人脸捕捉

          本文链接:https://www.haomeiwen.com/subject/qfabattx.html