AVFoundation 是一个可以用来使用和创建基于时间的视听媒体数据的框架。
以下为.m文件中的代码:
#import "YBCustomCameraVC.h"
#define KScreenWidth [UIScreen mainScreen].bounds.size.width
#define KScreenHeight [UIScreen mainScreen].bounds.size.height
//导入相机框架�
#import <AVFoundation/AVFoundation.h>
//将拍摄好的照片写入系统相册中,所以我们在这里还需要导入一个相册需要的头文件iOS8
#import <Photos/Photos.h>
@interface YBCustomCameraVC ()<UIAlertViewDelegate,AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureMetadataOutputObjectsDelegate>
{
UILabel * faceLabel;
}
//捕获设备,通常是前置摄像头,后置摄像头,麦克风(音频输入)
@property(nonatomic)AVCaptureDevice *device;
//AVCaptureDeviceInput 代表输入设备,他使用AVCaptureDevice 来初始化
@property(nonatomic)AVCaptureDeviceInput *input;
//当启动摄像头开始捕获输入
@property(nonatomic)AVCaptureMetadataOutput *output;
//照片输出流
@property (nonatomic)AVCaptureStillImageOutput *ImageOutPut;
//session:由他把输入输出结合在一起,并开始启动捕获设备(摄像头)
@property(nonatomic)AVCaptureSession *session;
//图像预览层,实时显示捕获的图像
@property(nonatomic)AVCaptureVideoPreviewLayer *previewLayer;
// ------------- UI --------------
//拍照按钮
@property (nonatomic)UIButton *photoButton;
//聚焦
@property (nonatomic)UIView *focusView;
@property (nonatomic,strong) dispatch_queue_t sample;
@property (nonatomic,strong) dispatch_queue_t faceQueue;
@property (nonatomic,copy) NSArray *currentMetadata; //如果检测到了人脸系统会返回一个数组 我们将这个数组存起来
@end
@implementation YBCustomCameraVC
- (void)viewDidLoad {
[super viewDidLoad];
self.view.backgroundColor = [UIColor clearColor];
if ( [self checkCameraPermission]) {
[self customCamera];
//页面UI
[self initSubViews];
[self focusAtPoint:CGPointMake(0.5, 0.5)];
}
}
- (void)customCamera
{
_sample = dispatch_queue_create("sample", NULL);
_faceQueue = dispatch_queue_create("face", NULL);
//1.创建捕捉会话。生成会话,用来结合输入输出
self.session = [[AVCaptureSession alloc]init];
if ([self.session canSetSessionPreset:AVCaptureSessionPreset1280x720]){
[self.session setSessionPreset:AVCaptureSessionPreset1280x720];
}
//2.添加摄像头输入 AVCaptureDeviceInput。使用AVMediaTypeVideo 指明self.device代表视频,默认使用后置摄像头进行初始化
self.device = [self frontCamera];
//使用设备初始化输入
self.input = [[AVCaptureDeviceInput alloc]initWithDevice:self.device error:nil];
if ([self.session canAddInput:self.input]) {
[self.session addInput:self.input];
}
//3.添加输出设备
AVCaptureVideoDataOutput *output = [[AVCaptureVideoDataOutput alloc] init];
[output setSampleBufferDelegate:self queue:_sample];
if ([self.session canAddOutput:output]) {
[self.session addOutput:output];
}
//4.添加人脸识别输出。生成输出对象
self.output = [[AVCaptureMetadataOutput alloc]init];
[self.output setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
self.ImageOutPut = [[AVCaptureStillImageOutput alloc]init];
if ([self.session canAddOutput:self.output]) {
[self.session addOutput:self.output];
[self.session addOutput:self.ImageOutPut];
}
//添加输出的类型为人脸类型代码必须放在[self.session addOutput:self.output];后面,否则崩溃报错
[self.output setMetadataObjectTypes:@[AVMetadataObjectTypeFace]];
//5.使用self.session,初始化预览层,self.session负责驱动input进行信息的采集,layer负责把图像渲染显示
self.previewLayer = [[AVCaptureVideoPreviewLayer alloc]initWithSession:self.session];
self.previewLayer.frame = CGRectMake(0, 0, KScreenWidth, KScreenHeight);
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.view.layer addSublayer:self.previewLayer];
//6.开始启动摄像
[self.session startRunning];
//修改设备的属性,先加锁
if ([self.device lockForConfiguration:nil]) {
//自动白平衡
if ([self.device isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeAutoWhiteBalance]) {
[self.device setWhiteBalanceMode:AVCaptureWhiteBalanceModeAutoWhiteBalance];
}
//解锁
[self.device unlockForConfiguration];
}
}
//
- (void)initSubViews
{
UIImageView *OutlineImg = [[UIImageView alloc]init];
OutlineImg.frame = CGRectMake(0, 0, K_SCREEN_WIDTH, K_SCREEN_HEIGHT);
OutlineImg.image = IMAGE(@"touxiang");
[self.view addSubview:OutlineImg];
//返回按钮
UIButton *btn = [UIButton new];
btn.frame = CGRectMake(10, 20, 40, 40);
[btn setImage:IMAGE(@"authentication_error") forState:UIControlStateNormal];
[btn addTarget:self action:@selector(disMiss) forControlEvents:UIControlEventTouchUpInside];
[self.view addSubview:btn];
//是否检测到人脸
faceLabel = [[UILabel alloc]init];
faceLabel.frame = CGRectMake((SCREEN_WIDTH - 200)/2.0, btn.top, 200, 20);
faceLabel.centerY = btn.center.y;
faceLabel.text = @"未检测到人脸";
faceLabel.textColor = kWhiteColor;
faceLabel.font = FONT(18);
faceLabel.textAlignment = NSTextAlignmentCenter;
[self.view addSubview:faceLabel];
UILabel *PromptLab = [[UILabel alloc]init];
PromptLab.frame = CGRectMake(0, faceLabel.bottom + 20, K_SCREEN_WIDTH, 20);
PromptLab.text = @"请注视屏幕,垂直握紧手机";
PromptLab.textColor = kWhiteColor;
PromptLab.font = FONT(16);
PromptLab.textAlignment = NSTextAlignmentCenter;
[self.view addSubview:PromptLab];
//拍照按钮
self.photoButton = [UIButton new];
self.photoButton.frame = CGRectMake(KScreenWidth/2.0-35, KScreenHeight-100, 70, 70);
[self.photoButton setImage:[UIImage imageNamed:@"authentication_photograph"] forState:UIControlStateNormal];
[self.photoButton addTarget:self action:@selector(shutterCamera) forControlEvents:UIControlEventTouchUpInside];
[self.view addSubview:self.photoButton];
//上角图
UIImageView * topAngleImageView = [[UIImageView alloc]init];
topAngleImageView.frame = CGRectMake(10, PromptLab.top, SCREEN_WIDTH - 20, 20);
topAngleImageView.image = [UIImage imageNamed:@"authentication_topAngle"];
[self.view addSubview:topAngleImageView];
//下角图
UIImageView * bottomAngleImageView = [[UIImageView alloc]init];
bottomAngleImageView.frame = CGRectMake(10, self.photoButton.top - 40, SCREEN_WIDTH - 20, 20);
bottomAngleImageView.image = [UIImage imageNamed:@"authentication_bottomAngle"];
[self.view addSubview:bottomAngleImageView];
self.focusView = [[UIView alloc]initWithFrame:CGRectMake(0, 0, 80, 80)];
self.focusView.layer.borderWidth = 1.0;
self.focusView.layer.borderColor = [UIColor greenColor].CGColor;
[self.view addSubview:self.focusView];
self.focusView.hidden = YES;
UITapGestureRecognizer *tapGesture = [[UITapGestureRecognizer alloc]initWithTarget:self action:@selector(focusGesture:)];
[self.view addGestureRecognizer:tapGesture];
}
//聚焦
- (void)focusGesture:(UITapGestureRecognizer*)gesture{
CGPoint point = [gesture locationInView:gesture.view];
[self focusAtPoint:point];
}
- (void)focusAtPoint:(CGPoint)point{
CGSize size = self.view.bounds.size;
// focusPoint 函数后面Point取值范围是取景框左上角(0,0)到取景框右下角(1,1)之间,按这个来但位置就是不对,只能按上面的写法才可以。前面是点击位置的y/PreviewLayer的高度,后面是1-点击位置的x/PreviewLayer的宽度
CGPoint focusPoint = CGPointMake( point.y /size.height ,1 - point.x/size.width );
if ([self.device lockForConfiguration:nil]) {
if ([self.device isFocusModeSupported:AVCaptureFocusModeAutoFocus]) {
[self.device setFocusPointOfInterest:focusPoint];
[self.device setFocusMode:AVCaptureFocusModeAutoFocus];
}
if ([self.device isExposureModeSupported:AVCaptureExposureModeAutoExpose ]) {
[self.device setExposurePointOfInterest:focusPoint];
//曝光量调节
[self.device setExposureMode:AVCaptureExposureModeAutoExpose];
}
[self.device unlockForConfiguration];
_focusView.center = point;
_focusView.hidden = NO;
[UIView animateWithDuration:0.3 animations:^{
_focusView.transform = CGAffineTransformMakeScale(1.25, 1.25);
}completion:^(BOOL finished) {
[UIView animateWithDuration:0.5 animations:^{
_focusView.transform = CGAffineTransformIdentity;
} completion:^(BOOL finished) {
_focusView.hidden = YES;
}];
}];
}
}
#pragma mark - AVCaptureSession Delegate
- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
NSMutableArray *bounds = [NSMutableArray arrayWithCapacity:0];
//每一帧,我们都看一下 self.currentMetadata 里面有没有东西,然后将里面的
//AVMetadataFaceObject 转换成 AVMetadataObject,其中AVMetadataObject 的bouns 就是人脸的位置 ,我们将bouns 存到数组中
for (AVMetadataFaceObject *faceobject in self.currentMetadata) {
AVMetadataObject *face = [output transformedMetadataObjectForMetadataObject:faceobject connection:connection];
[bounds addObject:[NSValue valueWithCGRect:face.bounds]];
}
}
- (void)captureOutput:(AVCaptureOutput *)output didOutputMetadataObjects:(NSArray<__kindof AVMetadataObject *> *)metadataObjects fromConnection:(AVCaptureConnection *)connection {
//当检测到了人脸会走这个回调
_currentMetadata = metadataObjects;
//检测到人脸数据是就会有值, 人脸检测失败就会置位空
AVMetadataObject * metadata = metadataObjects.firstObject;
if (metadata && metadata.type == AVMetadataObjectTypeFace) {
//AVMetadataFaceObject *face = (AVMetadataFaceObject *)metadata;
//NSLog(@"倾斜角: %f, 偏转角:%f, bounds: %@",face.rollAngle,face.yawAngle ,NSStringFromCGRect(face.bounds) );
faceLabel.text = @"检测到人脸";
}
else {
faceLabel.text = @"未检测到人脸";
}
}
#pragma mark- 拍照
- (void)shutterCamera
{
if ([faceLabel.text isEqualToString:@"未检测到人脸"]) {
[SVProgressHUD showInfoWithStatus:@"未检测人脸,请对准到人脸拍照"];
[SVProgressHUD dismissWithDelay:1.0];
return;
}
AVCaptureConnection * videoConnection = [self.ImageOutPut connectionWithMediaType:AVMediaTypeVideo];
if (videoConnection == nil) {
return;
}
// [videoConnection setVideoOrientation:AVCaptureVideoOrientationLandscapeRight];
[self.ImageOutPut captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError *error) {
if (imageDataSampleBuffer == nil) {
return;
}
NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer];
UIImage *Image = [self fixOrientation:[UIImage imageWithData:imageData]];
NSLog(@"%ld",(long)Image.imageOrientation);
if (self.ReturnImageBlock) {
self.ReturnImageBlock(Image);
[self disMiss];
}
// [self saveImageWithImage:[UIImage imageWithData:imageData]];
}];
}
///**
// * 保存图片到相册
// */
//- (void)saveImageWithImage:(UIImage *)image {
// // 判断授权状态
// [PHPhotoLibrary requestAuthorization:^(PHAuthorizationStatus status) {
// if (status != PHAuthorizationStatusAuthorized) return;
//
// dispatch_async(dispatch_get_main_queue(), ^{
// NSError *error = nil;
//
// // 保存相片到相机胶卷
// __block PHObjectPlaceholder *createdAsset = nil;
// [[PHPhotoLibrary sharedPhotoLibrary] performChangesAndWait:^{
// createdAsset = [PHAssetCreationRequest creationRequestForAssetFromImage:image].placeholderForCreatedAsset;
// } error:&error];
//
// if (error) {
// NSLog(@"保存失败:%@", error);
// return;
// }
// });
// }];
//}
- (AVCaptureDevice *)frontCamera {
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
if ([device position] == AVCaptureDevicePositionFront) {
return device;
}
}
return nil;
}
- (void)disMiss
{
[self dismissViewControllerAnimated:YES completion:nil];
}
#pragma mark- 检测相机权限
- (BOOL)checkCameraPermission
{
AVAuthorizationStatus authStatus = [AVCaptureDevice authorizationStatusForMediaType:AVMediaTypeVideo];
if (authStatus == AVAuthorizationStatusDenied) {
UIAlertView *alertView = [[UIAlertView alloc]initWithTitle:@"请打开相机权限" message:@"设置-隐私-相机" delegate:self cancelButtonTitle:@"确定" otherButtonTitles:@"取消", nil];
alertView.tag = 100;
[alertView show];
return NO;
}
else{
return YES;
}
return YES;
}
- (void)alertView:(UIAlertView *)alertView clickedButtonAtIndex:(NSInteger)buttonIndex{
if (buttonIndex == 0 && alertView.tag == 100) {
NSURL * url = [NSURL URLWithString:UIApplicationOpenSettingsURLString];
if([[UIApplication sharedApplication] canOpenURL:url]) {
[[UIApplication sharedApplication] openURL:url];
}
}
if (buttonIndex == 1 && alertView.tag == 100) {
[self disMiss];
}
}
- (UIImage *)fixOrientation:(UIImage *)aImage {
// No-op if the orientation is already correct
if (aImage.imageOrientation == UIImageOrientationUp)
return aImage;
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
CGAffineTransform transform = CGAffineTransformIdentity;
switch (aImage.imageOrientation) {
case UIImageOrientationDown:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, aImage.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, 0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, 0, aImage.size.height);
transform = CGAffineTransformRotate(transform, -M_PI_2);
break;
default:
break;
}
switch (aImage.imageOrientation) {
case UIImageOrientationUpMirrored:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationLeftMirrored:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
default:
break;
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
CGContextRef ctx = CGBitmapContextCreate(NULL, aImage.size.width, aImage.size.height,
CGImageGetBitsPerComponent(aImage.CGImage), 0,
CGImageGetColorSpace(aImage.CGImage),
CGImageGetBitmapInfo(aImage.CGImage));
CGContextConcatCTM(ctx, transform);
switch (aImage.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
// Grr...
CGContextDrawImage(ctx, CGRectMake(0,0,aImage.size.height,aImage.size.width), aImage.CGImage);
break;
default:
CGContextDrawImage(ctx, CGRectMake(0,0,aImage.size.width,aImage.size.height), aImage.CGImage);
break;
}
// And now we just create a new UIImage from the drawing context
CGImageRef cgimg = CGBitmapContextCreateImage(ctx);
UIImage *img = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return img;
}
待完善......
网友评论