主要实现方法:
import Foundation
import AVKit
/// 使用AVFoundation和AVKit框架来合并和导出视频文件
/// 使用AVMutableComposition来创建一个组合的视频,并通过AVAssetExportSession将其导出为.mov格式的视频文件
///
/// MergeExport遵循ObservableObject协议,意味着它可以在SwiftUI中使用,以便在数据发生变化时更新视图
class MergeExport: ObservableObject {
// 使用@Published属性包装器声明了一个exportUrl变量,用于发布合并和导出操作后得到的URL
@Published var exportUrl: URL?
// 声明previewUrl变量和videoURLS数组,用于存储预览URL和要合并的视频URL
var previewUrl: URL?
var videoURLS = [URL]()
// 定义了一个常量HDVideoSize,表示高清视频的尺寸
let HDVideoSize = CGSize(width: 1920.0, height: 1080.0)
// 基于当前日期和时间,生成唯一导出URL
var uniqueUrl: URL {
var directory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
let date = dateFormatter.string(from: Date())
directory.appendPathComponent("merge-\(date).mov")
return directory
}
// 该方法将视频URLS中的视频文件合并成单个AVPlayerItem对象,用于预览
/**
方法首先将videoURLS数组中的每个URL转换为AVAsset对象,并创建一个AVMutableComposition对象来组合多个视频轨道和音频轨道。然后,它使用一个循环来遍历每个AVAsset对象,并将其视频轨道和音频轨道插入到composition对象中,从而形成一个连续的时间范围。最后,它返回一个AVPlayerItem对象,该对象包含了composition对象作为其资产
*/
func previewMerge() -> AVPlayerItem {
let videoAssets = videoURLS.map {
AVAsset(url: $0)
}
let composition = AVMutableComposition()
if let videoTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let audioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) {
var startTime = CMTime.zero
for asset in videoAssets {
do {
try videoTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: asset.tracks(withMediaType: .video)[0], at: startTime)
try audioTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: asset.tracks(withMediaType: .audio)[0], at: startTime)
} catch {
print("Error creating track")
}
startTime = CMTimeAdd(startTime, asset.duration)
}
}
return AVPlayerItem(asset: composition)
}
// 该方法将视频URLS中的视频文件进行合并和导出操作
func mergeAndExportVideo() {
try? FileManager.default.removeItem(at: uniqueUrl)
/**
videoURLS.map { AVAsset(url: $0) }使用map函数对videoURLS数组进行遍历。
在每次遍历中,$0表示当前的元素,即视频URL。
AVAsset(url: $0)创建一个AVAsset对象,该对象是根据当前视频URL创建的。
最终,
*/
let videoAssets = videoURLS.map {
AVAsset(url: $0)
}
// 创建一个AVMutableComposition对象来组合多个视频轨道和音频轨道
let composition = AVMutableComposition()
//创建一个AVMutableVideoCompositionInstruction对象和一个AVMutableVideoComposition对象,用于指定视频合成时的指令和参数
let mainInstruction = AVMutableVideoCompositionInstruction()
if let videoTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let audioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) {
var startTime = CMTime.zero
for asset in videoAssets {
do {
try videoTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: asset.tracks(withMediaType: .video)[0], at: startTime)
try audioTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: asset.tracks(withMediaType: .audio)[0], at: startTime)
} catch {
print("Error creating track")
}
// 调用videoCompositionInstructionFor(track:using:)方法来生成一个AVMutableVideoCompositionLayerInstruction对象,该对象设置了每个视频轨道在合成时的变换、透明度等属性,并将其添加到mainInstruction对象中。它设置了mainInstruction对象、videoComposition对象和composition对象的时间范围、帧率、渲染尺寸等属性
let instruction = videoCompositionInstructionFor(track: videoTrack, using: asset)
instruction.setOpacity(1.0, at: startTime)
if asset != videoAssets.last {
instruction.setOpacity(0.0, at: CMTimeAdd(startTime, asset.duration))
}
mainInstruction.layerInstructions.append(instruction)
startTime = CMTimeAdd(startTime, asset.duration)
}
let totalDuration = startTime
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: totalDuration)
// 创建一个AVAssetExportSession对象,指定输出路径、输出格式、网络优化等选项,并将videoComposition对象作为其视频组合
let videoComposition = AVMutableVideoComposition()
videoComposition.instructions = [mainInstruction]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComposition.renderSize = HDVideoSize
videoComposition.renderScale = 1.0
guard let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = uniqueUrl
exporter.outputFileType = .mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = videoComposition
// 调用exportAsynchronously方法来异步导出视频文件,并在导出完成后,在主线程中更新exportUrl属性。
exporter.exportAsynchronously {
DispatchQueue.main.async { [weak self] in
if let exportUrl = exporter.outputURL {
self?.exportUrl = exportUrl
}
}
}
}
}
// 用于创建AVMutableVideoCompositionLayerInstruction对象,并设置视频的变换和方向
func videoCompositionInstructionFor(track: AVCompositionTrack, using asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFrom(transform: transform)
var scaleToFitRatio = HDVideoSize.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = HDVideoSize.height / assetTrack.naturalSize.width
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
let concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: (assetTrack.naturalSize.width * scaleToFitRatio) * 0.60, y: 0))
instruction.setTransform(concat, at: CMTime.zero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
let concat = assetTrack.preferredTransform.concatenating(scaleFactor)
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
// 获取视频的方向信息
func orientationFrom(transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
isPortrait = true
assetOrientation = .right
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
isPortrait = true
assetOrientation = .left
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
}
网友评论