LoginSignup
22
9

More than 5 years have passed since last update.

swiftで動的にカメラ内の顔を検出して画像編集する(CIDetector)

Posted at

これはマイネットエンターテイメント Advent Calendar 23日目の記事です。

最近流行りのVRもいいけど、現実が全部嫁になればいいのにと日々思っているこの頃

そんな幻想を抱きながら顔を置き換える処理を、Swiftで簡単に顔検出が出来るみたいなのでやってみました

class TestViewController: UIViewController,UIGestureRecognizerDelegate,AVCaptureVideoDataOutputSampleBufferDelegate  {
    let captureSession = AVCaptureSession()
    let videoDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
    let audioDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeAudio)
    //let fileOutput = AVCaptureMovieFileOutput()
    var videoOutput = AVCaptureVideoDataOutput()
    var hideView = UIView()

    override func viewDidLoad() {
        super.viewDidLoad()

        //各デバイスの登録(audioは実際いらない)
        do {
            let videoInput = try AVCaptureDeviceInput(device: self.videoDevice) as AVCaptureDeviceInput
            self.captureSession.addInput(videoInput)
        } catch let error as NSError {
            print(error)
        }
        do {
            let audioInput = try AVCaptureDeviceInput(device: self.audioDevice) as AVCaptureInput
            self.captureSession.addInput(audioInput)
        } catch let error as NSError {
            print(error)
        }

        self.videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey : Int(kCVPixelFormatType_32BGRA)]

        //フレーム毎に呼び出すデリゲート登録
        let queue:dispatch_queue_t = dispatch_queue_create("myqueue", DISPATCH_QUEUE_SERIAL);
        self.videoOutput.setSampleBufferDelegate(self, queue: queue)
        self.videoOutput.alwaysDiscardsLateVideoFrames = true

        self.captureSession.addOutput(self.videoOutput)

        let videoLayer : AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession) 
        videoLayer.frame = self.view.bounds
        videoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill

        self.view.layer.addSublayer(videoLayer)

        //カメラ向き
        for connection in self.videoOutput.connections {
            if let conn = connection as? AVCaptureConnection {
                if conn.supportsVideoOrientation {
                    conn.videoOrientation = AVCaptureVideoOrientation.Portrait
                }
            }
        }
        hideView = UIView(frame: self.view.bounds)
        self.view.addSubview(hideView)
        self.captureSession.startRunning()    
    }
    func imageFromSampleBuffer(sampleBuffer: CMSampleBufferRef) -> UIImage {
    //バッファーをUIImageに変換
        let imageBuffer: CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer)!
        CVPixelBufferLockBaseAddress(imageBuffer, 0)
        let baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0)
        let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
        let width = CVPixelBufferGetWidth(imageBuffer)
        let height = CVPixelBufferGetHeight(imageBuffer)

        let colorSpace = CGColorSpaceCreateDeviceRGB()
        let bitmapInfo = (CGBitmapInfo.ByteOrder32Little.rawValue | CGImageAlphaInfo.PremultipliedFirst.rawValue)
        let context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo)
        let imageRef = CGBitmapContextCreateImage(context)

        CVPixelBufferUnlockBaseAddress(imageBuffer, 0)
        let resultImage: UIImage = UIImage(CGImage: imageRef!)
        return resultImage
    }
    func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
    {
        //同期処理(非同期処理ではキューが溜まりすぎて画面がついていかない)
        dispatch_sync(dispatch_get_main_queue(), {

            //バッファーをUIImageに変換
            var image = self.imageFromSampleBuffer(sampleBuffer)
            let ciimage:CIImage! = CIImage(image: image)

            //CIDetectorAccuracyHighだと高精度(使った感じは遠距離による判定の精度)だが処理が遅くなる
            var detector : CIDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options:[CIDetectorAccuracy: CIDetectorAccuracyLow] )
            var faces : NSArray = detector.featuresInImage(ciimage)

            // 検出された顔データを処理
            for subview:UIView in self.hideView.subviews  {
                subview.removeFromSuperview()
            }

            var feature : CIFaceFeature = CIFaceFeature()
            for feature in faces {

                // 座標変換
                var faceRect : CGRect = feature.bounds
                var widthPer = (self.view.bounds.width/image.size.width)
                var heightPer = (self.view.bounds.height/image.size.height)

                // UIKitは左上に原点があるが、CoreImageは左下に原点があるので揃える
                faceRect.origin.y = image.size.height - faceRect.origin.y - faceRect.size.height

                //倍率変換
                faceRect.origin.x = faceRect.origin.x * widthPer
                faceRect.origin.y = faceRect.origin.y * heightPer
                faceRect.size.width = faceRect.size.width * widthPer
                faceRect.size.height = faceRect.size.height * heightPer

                // 顔を隠す画像を表示
                let hideImage = UIImageView(image:UIImage(named:"trump.jpg"))
                hideImage.frame = faceRect

                self.hideView.addSubview(hideImage)
            }
        })
    }

    override func didReceiveMemoryWarning() {
        super.didReceiveMemoryWarning()
        // Dispose of any resources that can be recreated.
    }
}

上が元オバマ、下が処理後
image

image

感想

・実際上書きしたところで、もしストローとかポッキーを相手が咥えていたら刺さりそう

・正面画像の検出精度はLowでもなかなかだが、横顔はかなり厳しい印象

・細かい精度を求めないのであれば、かなり簡単なので結構応用出来そう

・CIDetectorではなくOpenCVだと顔以外も検出できるらしい

参考記事

swiftでAVCaptureVideoDataOutputを使ったカメラのテンプレート

Swiftで笑顔認識をやってみた

次回

次回はponkikkiさんです。

22
9
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
22
9