LoginSignup
0
0

Cinematic Frameworkを使ってシネマティックモードの動画のDepthを取得する

Posted at

Cinematic Frameworkを使ってシネマティックモードの動画のDepthを取得する

iOS17からCinematic Frameworkが追加されました
Cinematic Frameworkを利用することで、シネマティックモードで撮影した動画を簡単に操作できるようになりました

シンプルに映像とdepth、フォーカスの取得を試してみました

WWDC23のCinematic Frameworkのセッションはこちら

実装

AVAssetの取得

普通に?AVAssetを取得します
originalFileNamesが以下のように4つ表示されるので、少し特殊な構造になってそうです

originalFilenames: ["IMG_XXXX.MOV", "Adjustments.plist", "IMG_XXXXX0.aae", "FullSizeRender.mov"]

また、イマイチ再現できなかったのですが、写真アプリでトリミングなどの操作をすると、うまく読み込めない場合がありました

let phAssets: PHFetchResult = PHAsset.fetchAssets(with: .video, options: nil)
let phAsset = phAssets.lastObject!

let resources = PHAssetResource.assetResources(for: phAsset)
print("originalFilenames: \(resources.map({ $0.originalFilename }))")

let options = PHVideoRequestOptions()
options.isNetworkAccessAllowed = true
options.version = .original
options.deliveryMode = .highQualityFormat

let avAsset: AVAsset = await withCheckedContinuation { continuation in
    PHImageManager.default().requestAVAsset(forVideo: phAsset, options: options) { asset, _, _ in
        continuation.resume(returning: asset!)
    }
}

CNScriptの取得

Focus情報を取得するためにCNScriptとtoleranceを作成します
また、track情報の取得とtoleranceの作成のためにCNAssetInfoを作成します

let cinematicScript: CNScript = try await CNScript(asset: avAsset)

let assetInfo: CNAssetInfo = try await CNAssetInfo(asset: avAsset)
let nominalFrameRate = try await assetInfo.frameTimingTrack.load(.nominalFrameRate)
let naturalTimeScale = try await assetInfo.frameTimingTrack.load(.naturalTimeScale)
let tolerance = CMTimeMakeWithSeconds(1.0 / Double(nominalFrameRate), preferredTimescale: naturalTimeScale)

AVAssetReaderとReaderTrackOutputの作成

AVAssetReaderとAVAssetReaderTrackOutputを作成します
CNRenderingSession.sourcePixelFormatTypesを見る感じ、シネマティックモードの映像のpixel formatは420v 422v x420 x422 hdisの5つがサポートされてそうです
今回は色々やりやすいようにRGBAで貰います
また、depthをFloat16(half float)で取得します
Cinematic Frameworkを使うことで、映像のtrackと、depthのtrackを簡単に取得できました
Disparityは視差という意味なので、正しくはdepthそのものではなさそうですが、視差が大きいモノほど手前にあるので、この値を見て物体が手前にあるか奥にあるかを判断してよさそうです

let assetReader: AVAssetReader = try AVAssetReader(asset: avAsset)

let videoOutputSettings: [String: Any] = [
    kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32ARGB,
    kCVPixelBufferIOSurfacePropertiesKey as String: [String: Any]()
]
let videoTrackOutput = AVAssetReaderTrackOutput(track: assetInfo.cinematicVideoTrack, outputSettings: videoOutputSettings)
videoTrackOutput.alwaysCopiesSampleData = false
assetReader.add(videoTrackOutput)

let disparityOutputSettings: [String: Any] = [
    kCVPixelBufferPixelFormatTypeKey as String: [kCVPixelFormatType_DisparityFloat16]
]
let disparityTrackOutput = AVAssetReaderTrackOutput(track: assetInfo.cinematicDisparityTrack, outputSettings: disparityOutputSettings)
disparityTrackOutput.alwaysCopiesSampleData = false
assetReader.add(disparityTrackOutput)

読み取り開始

AVAssetReaderの読み取りを開始し、CADiplayLinkを使って30fpsで画面描画に合わせてループを呼び出します

let displayLink = CADisplayLink(target: self, selector: #selector(onDisplayLink(link:)))
displayLink.add(to: .main, forMode: .common)
displayLink.preferredFrameRateRange = .init(minimum: 30, maximum: 30, preferred: 30)
assetReader.startReading()

ループ処理

CNScriptを使い現在時刻のフォーカスされたエリアを取得します
AVAssetReaderTrackOutputからCMSampleBufferを取得し、CVPixelBuffer → CIImageと変換する事で、色々操作しやすくしてます
また、画像が180度反転していたので補正しておきます
映像の方はそのままCGImageを経由してUIImageに変換します
depthの方はCIKernel(後述)を使い、Float16からRGBAに変換してからCGImageを経由してUIImageに変換します

@objc func onDisplayLink(link: CADisplayLink) {
    guard let videoSampleBuffer = videoTrackOutput.copyNextSampleBuffer(),
          let disparitySampleBuffer = disparityTrackOutput.copyNextSampleBuffer() else { return }

    let frame = cinematicScript.frame(at: videoSampleBuffer.presentationTimeStamp, tolerance: tolerance)
    rect = frame?.focusDetection.normalizedRect

    let ciVideo = CIImage(cvPixelBuffer: videoSampleBuffer.imageBuffer!)
        .transformed(by: CGAffineTransform(rotationAngle: .pi))
    let cvVideo =  context.createCGImage(ciVideo, from: ciVideo.extent)!
    sourceImage = UIImage(cgImage: cvVideo)

    let ciDisparity = CIImage(cvPixelBuffer: disparitySampleBuffer.imageBuffer!)
        .transformed(by: CGAffineTransform(rotationAngle: .pi))
    let ciGrayScaledDisparity = kernel.apply(extent: ciDisparity.extent, arguments: [ciDisparity])!
    let cgGrayScaledDisparity =  context.createCGImage(ciGrayScaledDisparity, from: ciGrayScaledDisparity.extent)!
    depthImage = UIImage(cgImage: cgGrayScaledDisparity)
}

上記のdepth → RGBA変換のためにCIKernel用のshaderを作成します
metalファイルを追加し、Build Settingsで Other Metal Compiler Flags-fcikernelMTLLINKER_FLAGS-cikernel を設定します

#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>

extern "C" {
    namespace coreimage {
        float4 grayscale(coreimage::sample_h i, coreimage::destination dest) {
            return float4(i.r, i.r, i.r, 1);
        }
    }
}

作成したshaderのCIKernelを作成しておきます

let kernel: CIColorKernel = {
    let url = Bundle.main.url(forResource: "default", withExtension: "metallib")!
    let data = try! Data(contentsOf: url)
    return try! CIColorKernel(functionName: "grayscale", fromMetalLibraryData: data)
}()

表示

作成したUIImageを適当に表示しています

var body: some View {
    VStack {
        Image(uiImage: sourceImage)
            .resizable()
            .aspectRatio(contentMode: .fit)
            .overlay {
                if let rect = vm.rect {
                    GeometryReader { geometry in
                        Rectangle()
                            .stroke(Color.red, lineWidth: 2)
                            .frame(width: geometry.size.width * rect.width, height: geometry.size.height * rect.height)
                            .position(x: geometry.size.width * (1 - rect.midX), y: geometry.size.height * (1 - rect.midY))
                    }
                }
            }
        Image(uiImage: depthImage)
            .resizable()
            .aspectRatio(contentMode: .fit)
    }
}

結果

映像とdepthとフォーカスを表示できました

まとめ

iOS17から追加されたCinematic Frameworkを使い、シネマティックモードで撮影した動画の映像とdepth、フォーカスを取得し、表示してみました
Cinematic Frameworkを利用することで、disparityのtrackを簡単に特定し、AVAssetReaderTrackOutputでdepthを取り出すことができました
シネマティックモードで撮影したdepthは指の感じもなんとなく分かるほど、思っていたより精度が高く感じました
また、Cinematic Frameworkにはシネマティックモードの動画の編集や出力の機能もあるので、今後試してみたいです

コード全文

コード全文
import AVFoundation
import Cinematic
import SwiftUI
import Photos

struct ContentView: View {
    @StateObject var vm = ContentViewModel()
    var body: some View {
        VStack {
            if let image = vm.sourceImage, let depthImage = vm.depthImage {
                Image(uiImage: image)
                    .resizable()
                    .aspectRatio(contentMode: .fit)
                    .overlay { rect }
                Image(uiImage: depthImage)
                    .resizable()
                    .aspectRatio(contentMode: .fit)
            } else {
                Image(systemName: "globe")
                    .imageScale(.large)
                    .foregroundStyle(.tint)
                Text("Hello, world!")
            }
        }
        .task {
            await vm.task()
        }
    }

    @ViewBuilder
    var rect: some View {
        if let rect = vm.rect {
            GeometryReader { geometry in
                Rectangle()
                    .stroke(Color.red, lineWidth: 2)
                    .frame(width: geometry.size.width * rect.width, height: geometry.size.height * rect.height)
                    .position(x: geometry.size.width * (1 - rect.midX), y: geometry.size.height * (1 - rect.midY))
            }
        }
    }
}

@MainActor
class ContentViewModel: ObservableObject {
    @Published var sourceImage: UIImage?
    @Published var depthImage: UIImage?
    @Published var rect: CGRect?

    let context = CIContext()

    var avAsset: AVAsset!
    var cinematicScript: CNScript!
    var assetInfo: CNAssetInfo!
    var tolerance: CMTime!
    var assetReader: AVAssetReader!
    var videoTrackOutput: AVAssetReaderTrackOutput!
    var disparityTrackOutput: AVAssetReaderTrackOutput!

    var displayLink: CADisplayLink!

    let kernel: CIColorKernel = {
        let url = Bundle.main.url(forResource: "default", withExtension: "metallib")!
        let data = try! Data(contentsOf: url)
        return try! CIColorKernel(functionName: "grayscale", fromMetalLibraryData: data)
    }()

    func task() async {
        let phAssets: PHFetchResult = PHAsset.fetchAssets(with: .video, options: nil)
        let phAsset = phAssets.lastObject!

        let resources = PHAssetResource.assetResources(for: phAsset)
        print("originalFilenames: \(resources.map({ $0.originalFilename }))")

        let options = PHVideoRequestOptions()
        options.isNetworkAccessAllowed = true
        options.version = .original
        options.deliveryMode = .highQualityFormat

        avAsset = await withCheckedContinuation { continuation in
            PHImageManager.default().requestAVAsset(forVideo: phAsset, options: options) { asset, _, _ in
                continuation.resume(returning: asset!)
            }
        }
        cinematicScript = try! await CNScript(asset: avAsset)

        assetInfo = try! await CNAssetInfo(asset: avAsset)
        let nominalFrameRate = try! await assetInfo.frameTimingTrack.load(.nominalFrameRate)
        let naturalTimeScale = try! await assetInfo.frameTimingTrack.load(.naturalTimeScale)
        tolerance = CMTimeMakeWithSeconds(1.0 / Double(nominalFrameRate), preferredTimescale: naturalTimeScale)

        assetReader = try! AVAssetReader(asset: avAsset)

        let videoOutputSettings: [String: Any] = [
            kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32ARGB,
            kCVPixelBufferIOSurfacePropertiesKey as String: [String: Any]()
        ]
        videoTrackOutput = AVAssetReaderTrackOutput(track: assetInfo.cinematicVideoTrack, outputSettings: videoOutputSettings)
        videoTrackOutput.alwaysCopiesSampleData = false
        assetReader.add(videoTrackOutput)

        let disparityOutputSettings: [String: Any] = [
            kCVPixelBufferPixelFormatTypeKey as String: [kCVPixelFormatType_DisparityFloat16]
        ]
        disparityTrackOutput = AVAssetReaderTrackOutput(track: assetInfo.cinematicDisparityTrack, outputSettings: disparityOutputSettings)
        disparityTrackOutput.alwaysCopiesSampleData = false
        assetReader.add(disparityTrackOutput)

        displayLink = CADisplayLink(target: self, selector: #selector(onDisplayLink(link:)))
        displayLink.add(to: .main, forMode: .common)
        displayLink.preferredFrameRateRange = .init(minimum: 30, maximum: 30, preferred: 30)
        assetReader.startReading()
    }

    @objc func onDisplayLink(link: CADisplayLink) {
        guard let videoSampleBuffer = videoTrackOutput.copyNextSampleBuffer(),
              let disparitySampleBuffer = disparityTrackOutput.copyNextSampleBuffer() else { return }

        let frame = cinematicScript.frame(at: videoSampleBuffer.presentationTimeStamp, tolerance: tolerance)
        rect = frame?.focusDetection.normalizedRect

        let ciVideo = CIImage(cvPixelBuffer: videoSampleBuffer.imageBuffer!)
            .transformed(by: CGAffineTransform(rotationAngle: .pi))
        let cvVideo =  context.createCGImage(ciVideo, from: ciVideo.extent)!
        sourceImage = UIImage(cgImage: cvVideo)

        let ciDisparity = CIImage(cvPixelBuffer: disparitySampleBuffer.imageBuffer!)
            .transformed(by: CGAffineTransform(rotationAngle: .pi))
        let ciGrayScaledDisparity = kernel.apply(extent: ciDisparity.extent, arguments: [ciDisparity])!
        let cgGrayScaledDisparity =  context.createCGImage(ciGrayScaledDisparity, from: ciGrayScaledDisparity.extent)!
        depthImage = UIImage(cgImage: cgGrayScaledDisparity)
    }
}
0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0