美文网首页
Swift 4.2 自定义相机

Swift 4.2 自定义相机

作者: JasonFive | 来源:发表于2018-11-28 21:01 被阅读0次

    自定义相机使用 AVFoundation 框架,简单的做了个demo,后面再更新,效果是这样的:


    111543403082_.pic_hd.jpg

    我们需要用到的几个对象

        var device: AVCaptureDevice! /// 获取设备:如摄像头
        var captureSession: AVCaptureSession! /// 会话,协调着input到output的数据传输,input和output的桥梁
        var previewLayer: AVCaptureVideoPreviewLayer! /// 图像预览层,实时显示捕获的图像
        var output:  AVCaptureVideoDataOutput! /// 图像流输出
        var beganTakePicture:Bool = false /// 相机开始拍照
    

    创建一个会话Session

            // SessionPreset,用于设置output输出流的画面质量
            captureSession = AVCaptureSession()
            //captureSession.sessionPreset = AVCaptureSession.Preset.photo
            if UIDevice.current.userInterfaceIdiom == .phone {
                captureSession.sessionPreset = AVCaptureSession.Preset.vga640x480
            } else {
                captureSession.sessionPreset = AVCaptureSession.Preset.photo
            }
            // 设置为高分辨率
            if captureSession.canSetSessionPreset(AVCaptureSession.Preset(rawValue: "AVCaptureSessionPreset1280x720")) {
                captureSession.sessionPreset = AVCaptureSession.Preset(rawValue: "AVCaptureSessionPreset1280x720")
            }
            // 获取输入设备,builtInWideAngleCamera是通用相机,AVMediaType.video代表视频媒体,back表示前置摄像头,如果需要后置摄像头修改为front
            if #available(iOS 10.0, *)
            {
                let availbleDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .back).devices
                device = availbleDevices.first
            }
            else
            {
                let devices = AVCaptureDevice.devices(for: .video)
                guard devices.count > 0 else { return } /// 初始化摄像头设备
                guard let device = devices.filter({  return $0.position == .back }).first else { return }
                self.device = device
            }
    

    配置 session

            captureSession.beginConfiguration()
            do {
                // 将后置摄像头作为session的input 输入流
                let captureDeviceInput = try AVCaptureDeviceInput(device: device)
                captureSession.addInput(captureDeviceInput)
            } catch {
                print(error.localizedDescription)
            }
            // 设定视频预览层,也就是相机预览layer
            previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
            sessionView.layer.addSublayer(previewLayer)    /// >>> sessionView 中
            previewLayer.frame = CGRect(x: 0, y: 0, width: JZScreenWidth, height: JZScreenHeigth-80)
            //previewLayer.videoGravity = AVLayerVideoGravity(rawValue: "AVLayerVideoGravityResizeAspectFill")
            previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill /// 相机页面展现形式-拉伸充满frame
            
            // 设定输出流
            output = AVCaptureVideoDataOutput()
            // 指定像素格式
            output.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)] as [String : Any]
            // 是否直接丢弃处理旧帧时捕获的新帧,默认为True,如果改为false会大幅提高内存使用
            output.alwaysDiscardsLateVideoFrames = true
            if captureSession.canAddOutput(output) {
                captureSession.addOutput(output)
            }
            // beginConfiguration()和commitConfiguration()方法中的修改将在commit时同时提交
            captureSession.commitConfiguration()
            captureSession.startRunning()
           // 开新线程进行输出流代理方法调用
            let queue = DispatchQueue(label: "com.brianadvent.captureQueue")
            output.setSampleBufferDelegate(self, queue: queue)
            
            let captureConnection = output.connection(with: .video)
            if captureConnection?.isVideoStabilizationSupported == true {
                /// 这个很重要 这个是为了拍照完成,防止图片旋转90度
                captureConnection?.videoOrientation = self.getCaptureVideoOrientation()
            }
    

    遵循 AVCaptureVideoDataOutputSampleBufferDelegate
    实现 delegate 方法

          func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
            if beganTakePicture == true {
                beganTakePicture = false
                /// 注意在主线程中执行
                DispatchQueue.main.async {
                    #if false 
                    self.photoImageView.image = self.imageConvert(sampleBuffer: sampleBuffer)
                    self.captureSession.stopRunning()
                    self.view.bringSubviewToFront(self.photoView)
                    # else 
                    self.captureSession.stopRunning()
                    let image = self.imageConvert(sampleBuffer: sampleBuffer)
                    // 导出照片
                    let image1 = self.scaleToSize(image: image!, size: CGSize(width: JZScreenWidth, height: JZScreenHeigth - 40))
                    // 放大二倍
                    let resultImage = self.imageFromImage(imageFromImage: image1, inRext: CGRect(x: 0, y: 20 + (JZScreenHeigth - (JZScreenWidth * self.heightZoomPoint)), width: JZScreenWidth*2, height: (JZScreenWidth * self.heightZoomPoint)*2))
                    self.photoImageView.image = resultImage
                    self.view.bringSubviewToFront(self.photoView)
                    #endif
                }
            }
        }
    

    到此,基本就完成了简单的自定义相机拍照功能
    此时,还需要几个帮助方法
    拍照防止图片出来后旋转90度

        /// 旋转方向
        func getCaptureVideoOrientation() -> AVCaptureVideoOrientation {
            switch UIDevice.current.orientation {
            case .portrait,.faceUp,.faceDown:
                return .portrait
            case .portraitUpsideDown: // 如果这里设置成AVCaptureVideoOrientationPortraitUpsideDown,则视频方向和拍摄时的方向是相反的。
                return .portrait
            case .landscapeLeft:
                return .landscapeRight
            case .landscapeRight:
                return .landscapeLeft
            default:
                return .portrait
            }
        }
    

    将CMSampleBufferRef流转为Image

        /// CMSampleBufferRef=>UIImage
        func imageConvert(sampleBuffer:CMSampleBuffer?) -> UIImage? {
            guard sampleBuffer != nil && CMSampleBufferIsValid(sampleBuffer!) == true else { return nil }
            let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer!)
            let ciImage = CIImage(cvPixelBuffer: pixelBuffer!)
            return UIImage(ciImage: ciImage)
        }
    

    闪光灯-打开-关闭

            let device:AVCaptureDevice = AVCaptureDevice.default(for: .video)!
            // 呼叫控制硬件
            try! device.lockForConfiguration()
            if device.torchMode == .on {
                device.torchMode = .off
                sender.isSelected = false
            } else {
                device.torchMode = .on
                sender.isSelected = true
            }
            // 控制完毕需要关闭控制硬件
            device.unlockForConfiguration()
    

    检查相机权限

        // MARK: - 检查相机权限
        func canUseCamera() -> Bool {
            let authStatus: AVAuthorizationStatus = AVCaptureDevice.authorizationStatus(for: .video)
             /*
              notDetermined: // 用户还没有关于这个应用程序做出了选择
              restricted: // 这个应用程序未被授权访问图片数据。用户不能更              改该应用程序的状态,可能是由于活动的限制,如家长控制到位。
              denied: // 用户已经明确否认了这个应用程序访问图片数据
              authorized: // 用户授权此应用程序访问图片数据
            */
            if authStatus == .denied {
                let alert = UIAlertView(title: "请打开相机权限", message: "设置-隐私-相机", delegate: nil, cancelButtonTitle: "取消", otherButtonTitles: "确定")
                alert.show()
                return false
            }
            return true
        }
    

    图片的裁剪

            //1.先实现这个方法后得到返回的照片
            func scaleToSize(image:UIImage!,size:CGSize) -> UIImage{
            // 得到图片上下文,指定绘制范围
            //UIGraphicsBeginImageContext(size);
            
            /*
             *  UIGraphicsBeginImageContextWithOptions(CGSize size, BOOL opaque, CGFloat scale)
             *  CGSize size:指定将来创建出来的bitmap的大小
             *  BOOL opaque:设置透明YES代表透明,NO代表不透明
             *  CGFloat scale:代表缩放,0代表不缩放
             *  创建出来的bitmap就对应一个UIImage对象
             *  为了不影响像素,将图片放大了2倍
             */
            UIGraphicsBeginImageContextWithOptions(size, false, 2.0)
            // 将图片按照指定大小绘制
            image.draw(in: CGRect(x:0,y:0,width:size.width,height:size.height))
            // 从当前图片上下文中导出图片
            let img:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
            // 当前图片上下文出栈
            UIGraphicsEndImageContext();
            // 返回新的改变大小后的图片
            return img
        }
        //2.实现这个方法,,就拿到了截取后的照片.
        func imageFromImage(imageFromImage:UIImage!,inRext:CGRect) ->UIImage{
            //将UIImage转换成CGImageRef
            let sourceImageRef:CGImage = imageFromImage.cgImage!
            // 按照给定的矩形区域进行剪裁
            // CGImageRef newImageRef = CGImageCreateWithImageInRect(sourceImageRef, rect);
            let newImageRef:CGImage = sourceImageRef.cropping(to: inRext)!
            // 将CGImageRef转换成UIImage
            // UIImage *newImage = [UIImage imageWithCGImage:newImageRef];
            let img:UIImage = UIImage.init(cgImage: newImageRef)
            //返回剪裁后的图片
            return img
        }
    

    此时,最关键的一步来了:拍照-按钮点击事件

        // 拍照
        @IBAction func startPhotoBtn(_ sender: UIButton) {
            beganTakePicture = true
        }
    

    hhh

    相关文章

      网友评论

          本文标题:Swift 4.2 自定义相机

          本文链接:https://www.haomeiwen.com/subject/hebzqqtx.html