两个注意点
在初始化AVCaptureSession时设置输出图像尺寸
AVCaptureSession *session = [[AVCaptureSession alloc] init];
session.sessionPreset = AVCaptureSessionPreset640x480;
在设置AVCaptureVideoDataOutput时要注意设置YUV输出而不是RGB
AVCaptureVideoDataOutput * dataOutput = [[AVCaptureVideoDataOutput alloc] init];
[dataOutput setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)}];
在Camera的代理方法中可以拿到CMSampleBufferRef
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
<!--sampleBuffer就是实际摄像头输出的实时数据-->
}
//顺时针旋转90度
//M N 分别代表宽和高
void Matrix_Rotate_90(uint8_t *src,uint8_t *dst,int M ,int N)
{
for(int i=0;i<M;i++)
for(int j=0;j<N;j++)
{
dst[i*N+j]=src[(N-1-j)*M+i];
}
}
转灰度图像数据
CVImageBufferRef imageBufferRef=CMSampleBufferGetImageBuffer(sampleBuffer);
<!--NSLog(@"查看宽%zu * 高%zu", CVPixelBufferGetWidth(imageBufferRef), CVPixelBufferGetHeight(imageBufferRef));-->
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
unsigned char *ptr_image=CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
<!--这里640 * 480 即开始设置session输出的AVCaptureSessionPreset640x480-->
unsigned char* grayImg = (unsigned char *)malloc(sizeof(unsigned char) * 640 * 480 * 4);
<!--摄像头出来的数据通常都是横着的,需要转一下角度,之后灰度图就可以传给人脸识别算法用了-->
Matrix_Rotate_90(ptr_image,grayImg,640,480);
网友评论