美文网首页
iOS 基于Vision实现视频人脸关键点提取

iOS 基于Vision实现视频人脸关键点提取

作者: 如意神王 | 来源:发表于2019-07-21 15:57 被阅读0次

1.AVFoundation 采集的视频输出代理

- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
    // 1.视频输出
    if ([connection isEqual:self.videoConnection]) {
        [self detectVNDetectFaceLandmarksRequestWithCMSampleBufferRef:sampleBuffer];
    }
}

2. VNImageRequestHandler输入类型

  1. CVPixelBufferRef
  2. CGImage
  3. NSURL
  4. NSData
    VNImageRequestHandler *detectFaceRequestHandler = [[VNImageRequestHandler alloc]initWithCVPixelBuffer:BufferRef options:@{}];

  VNImageRequestHandler * handler1 = [[VNImageRequestHandler alloc] initWithURL:@"NSURL" options:@{}];

    VNImageRequestHandler * handler2 = [[VNImageRequestHandler alloc] initWithData:nil options:@{}];
    
    VNImageRequestHandler * handler3 = [[VNImageRequestHandler alloc] initWithCGImage:NULL options:@{}];

3.视频数据人脸关键点请求

- (void)detectVNDetectFaceLandmarksRequestWithCMSampleBufferRef:(CMSampleBufferRef)sampleBuffer {
    
    CVPixelBufferRef BufferRef = CMSampleBufferGetImageBuffer(sampleBuffer);
    VNDetectFaceLandmarksRequest *detectFaceRequest = [[VNDetectFaceLandmarksRequest alloc ]init];
    VNImageRequestHandler *detectFaceRequestHandler = [[VNImageRequestHandler alloc]initWithCVPixelBuffer:BufferRef options:@{}];

    [detectFaceRequestHandler performRequests:@[detectFaceRequest] error:nil];
    NSArray *results = detectFaceRequest.results;
    NSLog(@"results count == %lu", (unsigned long)results.count);
    
    [self faceLandmarks:results];
}

4. 请求后数据处理,原理与上一篇图片人脸特征提取一样

// 获取信息成功后 处理
- (void)faceLandmarks:(NSArray *)faces{
    dispatch_async(dispatch_get_main_queue(), ^{
        //红点
        for (CALayer *layer in self.landmarksLayers) {
            [layer removeFromSuperlayer];
        }
        // 可能是多张脸
        [faces enumerateObjectsUsingBlock:^(VNFaceObservation *face, NSUInteger idx, BOOL * _Nonnull stop) {
            
            /*
             * face: VNFaceObservation 对象, 里面包含了 landmarks 位置信息, boundingBox 脸的大小 等等信息
             */
            
            
            // 取出单个脸的 landmarks
            VNFaceLandmarks2D *landmarks = face.landmarks;
            // 声明一个存关键位置的数组
            NSMutableArray *face_landmarks = [NSMutableArray array];
            
            // landmarks 是一个对象,对象中有左眼位置,右眼,鼻子,鼻梁等等属性 根据需求自己添加
            [face_landmarks addObject:landmarks.faceContour];
            [face_landmarks addObject:landmarks.leftEye];
            [face_landmarks addObject:landmarks.rightEye];
            [face_landmarks addObject:landmarks.leftEyebrow];
            [face_landmarks addObject:landmarks.rightEyebrow];
            [face_landmarks addObject:landmarks.outerLips];
            [face_landmarks addObject:landmarks.innerLips];
            [face_landmarks addObject:landmarks.nose];
            [face_landmarks addObject:landmarks.noseCrest];
            [face_landmarks addObject:landmarks.medianLine];
            [face_landmarks addObject:landmarks.outerLips];
            [face_landmarks addObject:landmarks.innerLips];
            [face_landmarks addObject:landmarks.leftPupil];
            [face_landmarks addObject:landmarks.rightPupil];
            
            VNFaceLandmarkRegion2D * leftPupilLandmarks = landmarks.leftPupil;
            VNFaceLandmarkRegion2D * rightPupilLandmarks = landmarks.rightPupil;
            
            CGPoint leftPupil = CGPointZero;
            CGPoint rightPupil = CGPointZero;
            
            for (NSUInteger i = 0; i < leftPupilLandmarks.pointCount; i++) {
                // 取出点
                leftPupil = leftPupilLandmarks.normalizedPoints[i];
                NSLog(@"leftPupil point == %@", NSStringFromCGPoint(leftPupil));
            }
            
            for (NSUInteger i = 0; i < rightPupilLandmarks.pointCount; i++) {
                // 取出点
                rightPupil = rightPupilLandmarks.normalizedPoints[i];
                NSLog(@"rightPupil point == %@", NSStringFromCGPoint(rightPupil));
            }
            
            CGRect oldRect = face.boundingBox;

            // 前置摄像头左右相反
            AVCaptureDevicePosition position = self.activeVideoInput.device.position;
            if (position == AVCaptureDevicePositionFront) {
                CGFloat faceBoudingBoxX = 1.0 - face.boundingBox.origin.x - face.boundingBox.size.width;
                oldRect.origin.x = faceBoudingBoxX;
                oldRect = oldRect;
            }
            
            
            CGFloat w = oldRect.size.width * self.bounds.size.width;
            CGFloat h = oldRect.size.height * self.bounds.size.height;
            CGFloat x = oldRect.origin.x * self.bounds.size.width;
            CGFloat y = self.bounds.size.height - (oldRect.origin.y * self.bounds.size.height) - h;
            
            // 添加矩形
            CALayer *testLayer = [CALayer layer];
            testLayer.borderWidth = 1;
            testLayer.cornerRadius = 3;
            testLayer.borderColor = [UIColor redColor].CGColor;
            testLayer.frame = CGRectMake(x, y, w, h);
            [self.layer addSublayer:testLayer];
            
            [self.landmarksLayers addObject:testLayer];
            
            
            NSLog(@"boundingBox == %@", NSStringFromCGRect(face.boundingBox));
            // 遍历位置信息
            [face_landmarks enumerateObjectsUsingBlock:^(VNFaceLandmarkRegion2D *obj, NSUInteger idx, BOOL * _Nonnull stop) {
                // VNFaceLandmarkRegion2D *obj 是一个对象. 表示当前的一个部位
                // 遍历当前部分所有的点
                for (int i=0; i<obj.pointCount; i++) {
                    // 取出点
                    CGPoint point = obj.normalizedPoints[i];
                    
                    // 前置摄像头左右相反
                    if (position == AVCaptureDevicePositionFront) {
                        point.x = 1.0 - point.x;
                        point = point;
                    }

                    
                    point.y = 1.0 - point.y;
                    CGFloat px = point.x * w + x;
                    CGFloat py = point.y * h + y;
                    CGPoint p = CGPointMake(px, py);
                    
                    
                    CALayer * layer = [CALayer layer];
                    layer.backgroundColor = [UIColor redColor].CGColor;
                    layer.frame = CGRectMake(0, 0, 2.0f, 2.0f);
                    layer.position = p;
                    layer.cornerRadius = 1.0f;
                    
                    [self.layer addSublayer:layer];
                    [self.landmarksLayers addObject:layer];
                    
                }
            }];
        }];
    });
}

5.相关的头文件

#import <Vision/Vision.h>
#import <CoreML/CoreML.h>
#import <ImageIO/ImageIO.h>
#import <objc/runtime.h>

6.效果图

Vision视频人脸关键点绘制.png

相关文章

网友评论

      本文标题:iOS 基于Vision实现视频人脸关键点提取

      本文链接:https://www.haomeiwen.com/subject/qmzmlctx.html