是什么导致AVMutableComposition大幅增加视频的大小? - iOS,Swift,AVFoundation

Arm*_*min 10 video ios avmutablecomposition swift swift3

假设我们有两个视频资源(AVAsset对象),让我们把它们称为空白主要,其中main是一个随机有限长度的视频,比方说2-5分钟,而空白总是4秒视频,我们要合并视频按以下顺序排列:

空白 - 主要 - 空白

    // Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.

    let mixComposition = AVMutableComposition()

    let assets = [blank, main, blank]
    var totalTime : CMTime = CMTimeMake(0, 0)
    var atTimeM: CMTime = CMTimeMake(0, 0)

    Utils.log([blank.duration, main.duration])

    // VIDEO TRACK
    let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))

    for (index,asset) in assets.enumerated() {

        do {

            if index == 0 {
                atTimeM = kCMTimeZero
            } else {
                atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
            }

            try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, asset.duration), of: asset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)

        } catch let error as NSError {
            Utils.log("error: \(error)")
        }

        totalTime = CMTimeAdd(totalTime, asset.duration)
    }

    // AUDIO TRACK
    let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
    do {
        try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, main.duration), of: main.tracks(withMediaType: AVMediaTypeAudio)[0], at: blank.duration)
    } catch _ {
        completionHandler(nil, ErrorType(rawValue: "Unable to add audio in composition."))
        return
    }

    let outputURL = mainVideoObject.getDirectoryURL()?.appendingPathComponent("video-with-blank.mp4")

    guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1280x720) else {
        completionHandler(nil, ErrorType(rawValue: "Unable to create export session."))
        return
    }

    let mainInstruction = AVMutableVideoCompositionInstruction()

    mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeAdd(blank.duration, CMTimeAdd(main.duration, blank.duration)))

    // Fixing orientation
    let firstLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
    let firstAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
    firstLayerInstruction.setTransform(firstAssetTrack.preferredTransform, at: kCMTimeZero)
    firstLayerInstruction.setOpacity(0.0, at: blank.duration)

    let secondLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
    let secondAssetTrack = main.tracks(withMediaType: AVMediaTypeVideo)[0]
    var isSecondAssetPortrait = false
    let secondTransform = secondAssetTrack.preferredTransform
    if (secondTransform.a == 0 && secondTransform.b == 1.0 && secondTransform.c == -1.0 && secondTransform.d == 0) {
        isSecondAssetPortrait = true
    }
    if (secondTransform.a == 0 && secondTransform.b == -1.0 && secondTransform.c == 1.0 && secondTransform.d == 0) {
        isSecondAssetPortrait = true
    }
    secondLayerInstruction.setTransform(secondAssetTrack.preferredTransform, at: blank.duration)
    secondLayerInstruction.setOpacity(0.0, at: CMTimeAdd(blank.duration, main.duration))

    let thirdLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
    let thirdAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
    thirdLayerInstruction.setTransform(thirdAssetTrack.preferredTransform, at: CMTimeAdd(blank.duration, main.duration))

    mainInstruction.layerInstructions = [firstLayerInstruction, secondLayerInstruction, thirdLayerInstruction]

    var naturalSize = CGSize()
    if(isSecondAssetPortrait) {
        naturalSize = CGSize(width: secondAssetTrack.naturalSize.height, height: secondAssetTrack.naturalSize.width)
    } else {
        naturalSize = secondAssetTrack.naturalSize
    }

    let renderWidth = naturalSize.width
    let renderHeight = naturalSize.height

    let mainCompositionInst = AVMutableVideoComposition()
    mainCompositionInst.instructions = [mainInstruction]
    mainCompositionInst.frameDuration = CMTimeMake(1, 30)
    mainCompositionInst.renderSize = CGSize(width: renderWidth, height: renderHeight)

    exporter.outputURL = outputURL
    exporter.outputFileType = AVFileTypeMPEG4
    exporter.videoComposition = mainCompositionInst
    //exporter.shouldOptimizeForNetworkUse = true

    exporter.exportAsynchronously {
        if exporter.status == .completed {
            completionHandler(AVAsset(url: outputURL!), nil)
        } else {
            completionHandler(nil, ErrorType(rawValue: "Unable to export video."))
            if let error = exporter.error {
                Utils.log("Unable to export video. \(error)")
            }
        }
    }
Run Code Online (Sandbox Code Playgroud)

假设720p质量的原始录像机需要大约200MB的空间,在主视频的开始和结束时添加4s空白视频不应该大幅改变大小,并且应该非常快速地完成处理.

然而,结果是视频的大小是原始视频的2到2.5倍(因此为400 - 500 MB)并且处理时间太长.

请指教,

谢谢

小智 3

在这里,我准备了一个自定义类,您可以在其中传递您的视频名称并将这些视频保存到捆绑包中。运行应用程序后,它将根据您的要求生成一个新的视频文件并将其放入应用程序文档目录路径中。

\n\n

我使用 Swift 4 准备了这个演示

\n\n
//\n//  ViewController.swift\n//  SOVideoMergingDemo\n//\n//  Created by iOS Test User on 03/01/18.\n//  Copyright \xc2\xa9 2018 Test User. Ltd. All rights reserved.\n//\n\nimport UIKit\nimport AVFoundation\nimport MediaPlayer\nimport Photos\nimport AssetsLibrary\nimport AVKit\n\n\nclass ViewController : UIViewController {\n\n    //--------------------------------------------------\n    //MARK:\n    //MARK: - IBOutlets\n    //--------------------------------------------------\n\n\n\n\n    //--------------------------------------------------\n    //MARK:\n    //MARK: - Properties\n    //--------------------------------------------------\n\n    var videoUrls : [URL]     = []\n    var arrVideoAsset : [AVAsset] = []\n    let video1 = "1"\n    let video2 = "2"\n    let outPutVideo = "MergedVideo.mp4"\n\n    let semaphore = DispatchSemaphore(value: 1)\n\n\n    //--------------------------------------------------\n    //MARK:\n    //MARK: - Custom Methods\n    //--------------------------------------------------\n\n    func getVideoURL(forVideo : String) -> URL {\n        let videoPath = Bundle.main.path(forResource: forVideo, ofType:"mp4")\n        let vidURL = URL(fileURLWithPath: videoPath!)\n        return vidURL\n    }\n\n    //--------------------------------------------------\n\n    func mergeVideos(arrVideoAsset : [AVAsset]) {\n\n        let mixComposition = AVMutableComposition()\n\n        //Tracks to insert in Composition for Merging\n        // Create video tracks\n        let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID:  kCMPersistentTrackID_Invalid)\n        let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID:  kCMPersistentTrackID_Invalid)\n         let thirdTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID:  kCMPersistentTrackID_Invalid)\n\n        do {\n            try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: kCMTimeZero)\n        } catch _ {\n            print("Failed to load first track")\n        }\n\n        do {\n            try secondTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[1].duration), of: arrVideoAsset[1].tracks(withMediaType: .video)[0], at: arrVideoAsset[0].duration)\n        } catch _ {\n            print("Failed to load second track")\n        }\n\n        do {\n            try thirdTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: arrVideoAsset[1].duration)\n        } catch _ {\n            print("Failed to load second track")\n        }\n\n        //This Instruciton is Created for Merging Video Tracks\n        let compositionInstruction = AVMutableVideoCompositionInstruction()\n        compositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero,CMTimeAdd(arrVideoAsset[0].duration, CMTimeAdd(arrVideoAsset[1].duration, arrVideoAsset[2].duration)))\n\n        //Creating Layer Instruction for Videos\n        let firstInstruction = videoCompositionInstructionForTrack(firstTrack!, asset: arrVideoAsset[0])\n        firstInstruction.setOpacity(0.0, at: arrVideoAsset[0].duration )\n        let secondInstruction = videoCompositionInstructionForTrack(secondTrack!, asset: arrVideoAsset[1])\n        secondInstruction.setOpacity(0.0, at: arrVideoAsset[1].duration)\n        let thirdInstruction = videoCompositionInstructionForTrack(thirdTrack!, asset: arrVideoAsset[2])\n\n        compositionInstruction.layerInstructions = [firstInstruction, secondInstruction,thirdInstruction]\n\n        //By  Changing These Height and Width User can affect Size of Merged Video. Calucalte it Carefully and As per you needs\n        let height = (Float((firstTrack?.naturalSize.height)!) < Float((secondTrack?.naturalSize.height)!)) ? firstTrack?.naturalSize.height : secondTrack?.naturalSize.height\n\n        let width = (Float((firstTrack?.naturalSize.width)!) < Float((secondTrack?.naturalSize.width)!)) ? firstTrack?.naturalSize.width : secondTrack?.naturalSize.width\n\n        let mainComposition = AVMutableVideoComposition()\n        mainComposition.instructions = [compositionInstruction]\n        mainComposition.frameDuration = CMTimeMake(1, 30)\n        mainComposition.renderSize = CGSize(width:width!,height: height!)\n\n        let exporter = AVAssetExportSession(asset:mixComposition, presetName: AVAssetExportPresetHighestQuality)\n        exporter?.outputURL = URL(fileURLWithPath: getDocumentDirectoryPath() + "/" + outPutVideo)\n        exporter?.outputFileType = AVFileType.mp4\n        exporter?.shouldOptimizeForNetworkUse = true\n        exporter?.videoComposition = mainComposition\n        print(self.getDocumentDirectoryPath())\n\n        exporter?.exportAsynchronously(completionHandler: {\n            DispatchQueue.main.async {\n                if exporter?.status == AVAssetExportSessionStatus.completed {\n                    do {\n                        let videoData = try Data(contentsOf: exporter!.outputURL!)\n                        try videoData.write(to: URL(fileURLWithPath : self.getDocumentDirectoryPath() + "/" + self.outPutVideo), options: Data.WritingOptions.atomic)\n                    } catch {\n                        print("Failed to Save video ===>>> \\(error.localizedDescription)")\n                    }\n\n\n                    //Uncomment This If you want to save video in Photos Library\n//                    PHPhotoLibrary.shared().performChanges({\n//                        PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: (exporter?.outputURL)!)\n//                    }, completionHandler: { (success, error) in\n//                        if success {\n//                            let fetchOptions = PHFetchOptions()\n//                            fetchOptions.sortDescriptors = [NSSortDescriptor.init(key:"creationDate", ascending: false)]\n//                            _ = PHAsset.fetchAssets(with: .video, options:fetchOptions).firstObject\n//                        } else {\n//                            print("Error in Saving File in Photo Libaray -> \\(String(describing: error?.localizedDescription))")\n//                        }\n//                    })\n                } else {\n                    print("Error -> \\(String(describing: exporter?.error?.localizedDescription))")\n                }\n            }\n        })\n\n    }\n\n    //--------------------------------------------------\n\n    //This Methiod is Used to Make Layer Instruction for Particular Video\n    func videoCompositionInstructionForTrack(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {\n        let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)\n        let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]\n        let scale : CGAffineTransform = CGAffineTransform(scaleX: 1, y:1)\n        instruction.setTransform(assetTrack.preferredTransform.concatenating(scale), at: kCMTimeZero)\n        return instruction\n    }\n\n    //--------------------------------------------------\n\n    func getDocumentDirectoryPath() -> String {\n        let arrPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)\n        return arrPaths[0]\n    }\n\n    //--------------------------------------------------\n    //MARK:\n    //MARK: - View Life Cycle Methods\n    //--------------------------------------------------\n\n    override func viewDidLoad() {\n        super.viewDidLoad()\n        // Do any additional setup after loading the view.\n\n        //Prepare Video Assets\n        arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))\n        arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video2)))\n        arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))\n\n        //Merge this Videos\n        mergeVideos(arrVideoAsset:arrVideoAsset)\n    }\n}\n
Run Code Online (Sandbox Code Playgroud)\n