CoreImage 头像检测
头像检测
Detection.swift
import UIKit
import ImageIO
struct Detection {
lazy var context: CIContext = {
return CIContext(options: nil)
}()
//处理中心
mutating func headPortrait(originalImage: UIImage, imageView: UIImageView) {
let inputImage = CIImage(image: originalImage)
let detector = CIDetector(ofType: CIDetectorTypeFace,
context: context,
options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
var faceFeatures: [CIFaceFeature]!
if let orientation = inputImage!.properties[kCGImagePropertyOrientation as String] {
faceFeatures = detector.featuresInImage(inputImage!,
options: [CIDetectorImageOrientation: orientation]
) as! [CIFaceFeature]
} else {
faceFeatures = detector.featuresInImage(inputImage!) as! [CIFaceFeature]
}
// 1.翻转图片 原理浏览下面学习来源地址
let inputImageSize = inputImage!.extent.size
var transform = CGAffineTransformIdentity
transform = CGAffineTransformScale(transform, 1, -1)
transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height)
for faceFeature in faceFeatures {
var faceViewBounds = CGRectApplyAffineTransform(faceFeature.bounds, transform)
// 2.调整大小
let scale = min(imageView.bounds.size.width / inputImageSize.width,
imageView.bounds.size.height / inputImageSize.height)
let offsetX = (imageView.bounds.size.width - inputImageSize.width * scale) / 2
let offsetY = (imageView.bounds.size.height - inputImageSize.height * scale) / 2
faceViewBounds = CGRectApplyAffineTransform(faceViewBounds, CGAffineTransformMakeScale(scale, scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
// 3.画框框
let faceView = UIView(frame: faceViewBounds)
faceView.layer.borderColor = UIColor.orangeColor().CGColor
faceView.layer.borderWidth = 1.5
imageView.addSubview(faceView)
}
}
}
var detection = Detection()
detection.headPortrait(originalImage, imageView: imageView)
参考来源:iOS8 Core Image In Swift
网友评论