关于iOS用户截屏操作,此文以swift为例,库使用iOS8之后的Photos库
iOS7开始提供了UIApplicationUserDidTakeScreenshotNotification通知用来获取用户截屏的操作,一下介绍两种获取用户截屏图片的方式
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
NotificationCenter.default.addObserver(self, selector: #selector(getScreenshotOperation), name: Notification.Name.UIApplicationUserDidTakeScreenshot, object: nil)
}
// 收到用户截屏通知,获取图片,并对图片进行编辑
@objc func getScreenshotOperation(notification: Notification){
AFPrint(message: "用户截屏了")
// 方式一:模拟用户截屏操作获取图片
let takeScreenshotImage: UIImage = self.imageWithScreenshot()
editImage(getCurrentImage: takeScreenshotImage)
// 方式二:从相册获取用户截屏图片(延迟的目的是确保截屏图片已存入到相册)
// self.perform(#selector(AFHomeViewController.getLastPhoto), with: nil, afterDelay: 3)
}
方式一:根据模拟用户截屏操作来获取图片
// MARK: - 模拟用户截屏行为,获取图片
private func dataWithScreenshotInPNGFormat() -> Data{
let imageSize: CGSize
let orientation: UIInterfaceOrientation = UIApplication.shared.statusBarOrientation
if UIInterfaceOrientationIsPortrait(orientation){
imageSize = UIScreen.main.bounds.size
}else{
imageSize = CGSize(width: AFScreenWidth, height: AFScreenHeight)
}
UIGraphicsBeginImageContextWithOptions(imageSize, false, 0)
let context: CGContext = UIGraphicsGetCurrentContext()!
for window: UIWindow in UIApplication.shared.windows {
context.saveGState()
context.translateBy(x: window.center.x, y: window.center.y)
context.concatenate(window.transform)
context.translateBy(x: -window.bounds.size.width * window.layer.anchorPoint.x, y: -window.bounds.size.height * window.layer.anchorPoint.y)
if orientation == .landscapeLeft{
context.rotate(by: CGFloat(Double.pi / 2))
context.translateBy(x: 0, y: -imageSize.width)
}else if orientation == .landscapeRight{
context.rotate(by: -CGFloat(Double.pi / 2))
context.translateBy(x: -imageSize.height, y: 0)
}else if orientation == .portraitUpsideDown{
context.rotate(by: -CGFloat(Double.pi))
context.translateBy(x: -imageSize.width, y: -imageSize.height)
}
if window.responds(to: #selector(UIView.drawHierarchy(in:afterScreenUpdates:))){
window.drawHierarchy(in: window.bounds, afterScreenUpdates: true)
}else{
window.layer.render(in: context)
}
context.restoreGState()
}
let image: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return UIImagePNGRepresentation(image)!
}
// MARK: - 获取用户截屏图片
private func imageWithScreenshot() -> UIImage{
let imageData = self.dataWithScreenshotInPNGFormat()
return UIImage(data: imageData)!
}
方法二:从相册中获取最新加入的图片
// MARK: - 获取相册中最后一张图片,即用户最新的截屏
@objc private func getLastPhoto(){
let options: PHFetchOptions = PHFetchOptions()
options.sortDescriptors = [NSSortDescriptor(key: "creationDate", ascending: false)]
let assetsFetchResults: PHFetchResult = PHAsset.fetchAssets(with: options)
let phasset: PHAsset = assetsFetchResults.firstObject!
let imageManager: PHCachingImageManager = PHCachingImageManager()
imageManager.requestImage(for: phasset, targetSize: CGSize(width: AFScreenWidth, height: AFScreenHeight - AFTopHeight - AFTabbarHeight), contentMode: PHImageContentMode.aspectFit, options: nil) { (result, info) in
self.editImage(getCurrentImage: result!)
// 删除原始图
/*PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.deleteAssets([assetsFetchResults.firstObject, assetsFetchResults[1]] as NSFastEnumeration)
}, completionHandler: { (success, error) in
if success{
AFPrint(message: "删除成功")
}else{
AFPrint(message: "删除失败")
}
})*/
}
}
对获取到图片进行编辑
@objc private func editImage(getCurrentImage: UIImage){
var takeScreenshotImage: UIImage = getCurrentImage
// 显示图片
let imageView: UIImageView = UIImageView(image: takeScreenshotImage)
imageView.frame = CGRect(x: 0, y: AFTopHeight, width: AFScreenWidth, height: AFScreenHeight - AFTopHeight - AFTabbarHeight)
self.view.addSubview(imageView)
// 添加水印
let point: CGPoint = CGPoint(x: 20, y: AFScreenHeight - AFTopHeight - AFTabbarHeight-30)
let attributeString: NSDictionary = [NSAttributedStringKey.font : UIFont.systemFont(ofSize: 14), NSAttributedStringKey.foregroundColor : UIColor.red, NSAttributedStringKey.backgroundColor : UIColor.yellow]
takeScreenshotImage = self.addWaterTextWithImage(image: takeScreenshotImage, text: "版权归属xxx,违者必究", textPoint: point, attributeString: attributeString)
imageView.image = takeScreenshotImage
// 保存图片到相册
PHPhotoLibrary.shared().performChanges({
let req: PHAssetChangeRequest = PHAssetChangeRequest.creationRequestForAsset(from: takeScreenshotImage)
AFPrint(message: req.creationDate)
}, completionHandler: { (success, error) in
AFPrint(message: "success " + "\(success)" + " error " + String(describing: error))
if success{
AFPrint(message: "保存成功")
}else{
AFPrint(message: "保存失败")
}
})
}
// MARK: - 图片添加文字水印
private func addWaterTextWithImage(image: UIImage, text: NSString, textPoint: CGPoint, attributeString: NSDictionary) -> UIImage{
// 开启上下文
UIGraphicsBeginImageContextWithOptions(image.size, false, 0)
// 绘制图片
image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
// 添加水印文字
text.draw(at: textPoint, withAttributes: attributeString as? [NSAttributedStringKey : Any])
let currentImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return currentImage
}
对于以上两种方式都存在一定的问题:
方式一:比如如果用户截屏上有类似toast提示,那么在用户截屏完成之后以及代码模仿用户截屏操作之前,有可能toast提示已经消失,那么就会造成用户的截屏以及代码模仿的用户截屏所获取的图片不同
方式二:获取相册最新添加的图片,有可能会存在最新的图片并不是用户截屏的图片
网友评论