iOS 推出的turiCreate功能(二) 目标检测 (上)
GitHub直通门
上章我们导出来目标检测的CoreML模型.现在就是我们要使用这个模型.
从这里我们可以了解到这个模型输出两个数组.第一个"classes" 就是我们跟踪的对象.第二个就是我们跟踪对象的位置数据.
直接上代码
- (void)objectDetectionWithSampleBuffer:(CMSampleBufferRef)sampleBuffer{
UIImage *newPhoto = [self imageFromSampleBuffer:sampleBuffer];
newPhoto = [UIImage imageWithCGImage:newPhoto.CGImage scale:1.0 orientation:UIImageOrientationRight];
newPhoto = [newPhoto fixOrientation];
objectTracking *model = [[objectTracking alloc] init];
CGFloat width = SCREENWIDTH;
CGFloat height = SCREENHEIGHT;
NSDictionary *classsesDic = model.model.modelDescription.metadata[MLModelCreatorDefinedKey];
VNCoreMLModel *coreMLModel = [VNCoreMLModel modelForMLModel:model.model error:nil];
VNCoreMLRequest *request = [[VNCoreMLRequest alloc] initWithModel:coreMLModel completionHandler:^(VNRequest * _Nonnull request, NSError * _Nullable error) {
CGFloat confidenceThreshold = mixNume;
NSMutableArray *unorderedPredictions = [NSMutableArray array];
VNCoreMLFeatureValueObservation *classeObservation = request.results[0];
VNCoreMLFeatureValueObservation *boxRectObservation = request.results[1];
MLMultiArray *coordinates = classeObservation.featureValue.multiArrayValue;
MLMultiArray *confidence = boxRectObservation.featureValue.multiArrayValue;
int numBoundingBoxes = confidence.shape[0].intValue;
int numClasses = confidence.shape[1].intValue;
for (int b = 0; b < numBoundingBoxes; b++) {
float maxConfidence = 0.0;
int maxIndex = 0;
for (int c = 0; c < numClasses; c++) {
NSNumber *confNumber = confidence[b * numClasses + c];
double conf = [confNumber doubleValue];
if (conf > maxConfidence) {
maxConfidence = conf;
maxIndex = c;
}
}
if (maxConfidence > confidenceThreshold) {
NSNumber *xNumber = coordinates[b * 4];
NSNumber *yNumber = coordinates[b * 4 + 1];
NSNumber *wNumber = coordinates[b * 4 + 2];
NSNumber *hNumber = coordinates[b * 4 + 3];
double x = [xNumber doubleValue];
double y = [yNumber doubleValue];
double w = [wNumber doubleValue];
double h = [hNumber doubleValue];
CGRect rect = CGRectMake(x - w/2, y - h/2, w, h);
Prediction prediction = {maxIndex, maxConfidence,rect};
NSValue *predictionValue = [NSValue valueWithBytes:&prediction objCType:@encode(Prediction)];
[unorderedPredictions addObject:predictionValue];
}
}
if (unorderedPredictions.count <=0) {
dispatch_async(dispatch_get_main_queue(), ^{
AVCaptureDevice * camera = videoCaptureDeviceInput.device;
if ([camera lockForConfiguration:nil]) {
[camera setExposurePointOfInterest:CGPointMake(0.5, 0.5)];
[camera setExposureMode:(AVCaptureExposureMode)AVCaptureExposureModeAutoExpose];
}
_infoLabel.text = @"";
_boxView.frame = CGRectZero;
});
return ;
}
NSMutableArray *predictions = [NSMutableArray array];
NSArray *orderedPredictions = [unorderedPredictions sortedArrayUsingComparator:^NSComparisonResult(id _Nonnull obj1, id _Nonnull obj2) {
NSValue *predictionValue1 = obj1;
NSValue *predictionValue2 = obj2;
Prediction prediciton1;
[predictionValue1 getValue:&prediciton1];
Prediction prediciton2;
[predictionValue2 getValue:&prediciton2];
if (prediciton1.confidence > prediciton2.confidence) {
return NSOrderedAscending;
} else {
return NSOrderedDescending;
}
}];
NSMutableArray *keep = [NSMutableArray array];
for (int i = 0; i < orderedPredictions.count; i++) {
[keep addObject:[NSNumber numberWithBool:true]];
}
for (int i = 0; i < orderedPredictions.count; i++) {
if (keep[i]) {
[predictions addObject:orderedPredictions[i]];
NSValue *predictionValue = orderedPredictions[i];
Prediction prediction;
[predictionValue getValue:&prediction];
CGRect bbox1 = prediction.boundingBox;
for (int j = i+1; j < orderedPredictions.count; j++) {
if (keep[j]) {
NSValue *predictionValueJ = orderedPredictions[i];
Prediction predictionJ;
[predictionValueJ getValue:&predictionJ];
CGRect bboxJ = predictionJ.boundingBox;
if ([self ioUwithA:bbox1 b:bboxJ] > nms_threshold) {
[keep replaceObjectAtIndex:j withObject:[NSNumber numberWithBool:false]];
}
}
}
}
}
dispatch_async(dispatch_get_main_queue(), ^{
NSValue *dolaamenPredictionValue = [predictions firstObject];
for (NSValue *predictionValue in predictions) {
Prediction doraemonPrediciton;
[predictionValue getValue:&doraemonPrediciton];
CGRect rect = CGRectMake(doraemonPrediciton.boundingBox.origin.x * width, doraemonPrediciton.boundingBox.origin.y * height, CGRectGetWidth(doraemonPrediciton.boundingBox) * width, CGRectGetHeight(doraemonPrediciton.boundingBox) * height);
NSLog(@"==> %f,%f,%f,%f",rect.origin.x,rect.origin.y,rect.size.width,rect.size.height);
}
Prediction dolaamenPrediction;
[dolaamenPredictionValue getValue:&dolaamenPrediction];
self.boxView.frame = CGRectMake(dolaamenPrediction.boundingBox.origin.x * width, dolaamenPrediction.boundingBox.origin.y * height , CGRectGetWidth(dolaamenPrediction.boundingBox) * width, CGRectGetHeight(dolaamenPrediction.boundingBox) * height);
if (dolaamenPrediction.labelIndex == 0) {
_infoLabel.text = [NSString stringWithFormat:@"哆啦A梦置信度:%f",dolaamenPrediction.confidence];
}
// } else {
// _infoLabel.text = [NSString stringWithFormat:@"%ld个pen置信度:%f",predictions.count,dolaamenPrediction.confidence];
// }
if (detectionNum == 5) {
detectionNum = 0;
isDetection = NO;
CGRect convertedRect = [captureVideoPreviewLayer metadataOutputRectOfInterestForRect:self.boxView.frame];
convertedRect = CGRectMake(convertedRect.origin.x , 1 - convertedRect.origin.y, convertedRect.size.width, convertedRect.size.height);
VNDetectedObjectObservation * newObservation = [VNDetectedObjectObservation observationWithBoundingBox:convertedRect];
self.lasetObservation = newObservation;
} else {
detectionNum ++;
}
[self changeFocusWithPoint:CGPointZero];
});
}];
request.imageCropAndScaleOption = VNImageCropAndScaleOptionScaleFill;
VNImageRequestHandler *requestHandler = [[VNImageRequestHandler alloc]initWithCGImage:newPhoto.CGImage options:@{}];
NSError *error = nil;
[requestHandler performRequests:@[request] error:&error];
if (error) {
NSLog(@"%@",error.localizedDescription);
}
}
#pragma mark - samplebuffer 转 cgimage
// 通过抽样缓存数据创建一个UIImage对象
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
// 为媒体数据设置一个CMSampleBuffer的Core Video图像缓存对象
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// 锁定pixel buffer的基地址
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// 得到pixel buffer的基地址
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// 得到pixel buffer的行字节数
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// 得到pixel buffer的宽和高
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
if (width == 0 || height == 0) {
return nil;
}
// 创建一个依赖于设备的RGB颜色空间
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// 用抽样缓存的数据创建一个位图格式的图形上下文(graphics context)对象
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGAffineTransform transform = CGAffineTransformIdentity;
CGContextConcatCTM(context, transform);
// 根据这个位图context中的像素数据创建一个Quartz image对象
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
// 裁剪 图片
struct CGImage *cgImage = CGImageCreateWithImageInRect(quartzImage, CGRectMake(0, 0, width, height));
// 解锁pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// 释放context和颜色空间
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// 用Quartz image创建一个UIImage对象image
UIImage *image = [UIImage imageWithCGImage:cgImage];
// 释放Quartz image对象
CGImageRelease(cgImage);
CGImageRelease(quartzImage);
return (image);
}
主要核心代码就是这些了.
原创文章转载需获授权并注明出处
请在后台留言联系转载
网友评论