AVFoundation - 为视频添加模糊背景

pig*_*_39 1 avfoundation ios

我正在开发一个视频编辑应用程序Swift.在我的情况下,我的输出视频如下所示在此输入图像描述在此输入图像描述

我试图用这样的模糊效果填充黑色部分在此输入图像描述在此输入图像描述

我搜索但没有得到任何有效的解决方案.任何援助都将是一个很大的帮助.

The*_*ger 8

Swift 4 - 为视频添加模糊背景

1.单视频支持❤️1
.多个视频合并支持❤️2
.支持任何比例的任何画布❤️3
.将最终视频保存到相机胶卷❤️5
.管理所有视频方向❤️

可能是我迟到了这个答案,但我仍然没有找到任何解决方案.分享我的工作:

在此下载示例代码

步骤为视频添加模糊背景:

  1. 合并所有没有音频
    的视频a)需要渲染区域大小.
    b)需要计算此区域内视频的比例和位置.对于aspectFill属性.
  2. 为合并的视频添加模糊效果
  3. 将逐个视频放在模糊视频的中心

合并视频

func mergeVideos(_ videos: Array<AVURLAsset>, inArea area:CGSize, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {

    // Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
    let mixComposition = AVMutableComposition()

    var instructionLayers : Array<AVMutableVideoCompositionLayerInstruction> = []

    for asset in videos {

        // Here we are creating the AVMutableCompositionTrack. See how we are adding a new track to our AVMutableComposition.
        let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)

        // Now we set the length of the track equal to the length of the asset and add the asset to out newly created track at kCMTimeZero for first track and lastAssetTime for current track so video plays from the start of the track to end.
        if let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first {


            /// Hide time for this video's layer
            let opacityStartTime: CMTime = CMTimeMakeWithSeconds(0, asset.duration.timescale)
            let opacityEndTime: CMTime = CMTimeAdd(mixComposition.duration, asset.duration)
            let hideAfter: CMTime = CMTimeAdd(opacityStartTime, opacityEndTime)


            let timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
            try? track?.insertTimeRange(timeRange, of: videoTrack, at: mixComposition.duration)


            /// Layer instrcution
            let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track!)
            layerInstruction.setOpacity(0.0, at: hideAfter)

            /// Add logic for aspectFit in given area
            let properties = scaleAndPositionInAspectFillMode(forTrack: videoTrack, inArea: area)


            /// Checking for orientation
            let videoOrientation: UIImageOrientation = self.getVideoOrientation(forTrack: videoTrack)
            let assetSize = self.assetSize(forTrack: videoTrack)

            if (videoOrientation == .down) {
                /// Rotate
                let defaultTransfrom = asset.preferredTransform
                let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi/2.0))

                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                var ytranslation: CGFloat = assetSize.height
                var xtranslation: CGFloat = 0
                if properties.position.y == 0 {
                    xtranslation = -(assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
                }
                else {
                    ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
                }
                let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)

                // Final transformation - Concatination
                let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }
            else if (videoOrientation == .left) {

                /// Rotate
                let defaultTransfrom = asset.preferredTransform
                let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi))

                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                var ytranslation: CGFloat = assetSize.height
                var xtranslation: CGFloat = assetSize.width
                if properties.position.y == 0 {
                    xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
                }
                else {
                    ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
                }
                let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)

                // Final transformation - Concatination
                let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }
            else if (videoOrientation == .right) {
                /// No need to rotate
                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                let translationTransform = CGAffineTransform(translationX: properties.position.x, y: properties.position.y)

                let finalTransform  = scaleTransform.concatenating(translationTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }
            else {
                /// Rotate
                let defaultTransfrom = asset.preferredTransform
                let rotateTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2.0))

                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                var ytranslation: CGFloat = 0
                var xtranslation: CGFloat = assetSize.width
                if properties.position.y == 0 {
                    xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
                }
                else {
                    ytranslation = -(assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
                }
                let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)

                // Final transformation - Concatination
                let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }

            instructionLayers.append(layerInstruction)
        }
    }


    let mainInstruction = AVMutableVideoCompositionInstruction()
    mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
    mainInstruction.layerInstructions = instructionLayers

    let mainCompositionInst = AVMutableVideoComposition()
    mainCompositionInst.instructions = [mainInstruction]
    mainCompositionInst.frameDuration = CMTimeMake(1, 30)
    mainCompositionInst.renderSize = area

    //let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
    let url = self.videoOutputURL
    try? FileManager.default.removeItem(at: url)

    let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
    exporter?.outputURL = url
    exporter?.outputFileType = .mp4
    exporter?.videoComposition = mainCompositionInst
    exporter?.shouldOptimizeForNetworkUse = true
    exporter?.exportAsynchronously(completionHandler: {
        if let anError = exporter?.error {
            completion(anError, nil)
        }
        else if exporter?.status == AVAssetExportSessionStatus.completed {
            completion(nil, url)
        }
    })
}
Run Code Online (Sandbox Code Playgroud)

添加模糊效果

func addBlurEffect(toVideo asset:AVURLAsset, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {

        let filter = CIFilter(name: "CIGaussianBlur")
        let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
            // Clamp to avoid blurring transparent pixels at the image edges
            let source: CIImage? = request.sourceImage.clampedToExtent()
            filter?.setValue(source, forKey: kCIInputImageKey)

            filter?.setValue(10.0, forKey: kCIInputRadiusKey)

            // Crop the blurred output to the bounds of the original image
            let output: CIImage? = filter?.outputImage?.cropped(to: request.sourceImage.extent)

            // Provide the filter output to the composition
            if let anOutput = output {
                request.finish(with: anOutput, context: nil)
            }
        })

        //let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
        let url = self.videoOutputURL
        // Remove any prevouis videos at that path
        try? FileManager.default.removeItem(at: url)

        let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetHighestQuality)

        // assign all instruction for the video processing (in this case the transformation for cropping the video
        exporter?.videoComposition = composition
        exporter?.outputFileType = .mp4
        exporter?.outputURL = url
        exporter?.exportAsynchronously(completionHandler: {
            if let anError = exporter?.error {
                completion(anError, nil)
            }
            else if exporter?.status == AVAssetExportSessionStatus.completed {
                completion(nil, url)
            }
        })
}
Run Code Online (Sandbox Code Playgroud)

将逐个视频放置在模糊视频的中心
这将是您的最终视频网址.

func addAllVideosAtCenterOfBlur(videos: Array<AVURLAsset>, blurVideo: AVURLAsset, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {


    // Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
    let mixComposition = AVMutableComposition()

    var instructionLayers : Array<AVMutableVideoCompositionLayerInstruction> = []


    // Add blur video first
    let blurVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
    // Blur layer instruction
    if let videoTrack = blurVideo.tracks(withMediaType: AVMediaType.video).first {
        let timeRange = CMTimeRangeMake(kCMTimeZero, blurVideo.duration)
        try? blurVideoTrack?.insertTimeRange(timeRange, of: videoTrack, at: kCMTimeZero)
    }

    /// Add other videos at center of the blur video
    var startAt = kCMTimeZero
    for asset in videos {

        /// Time Range of asset
        let timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)

        // Here we are creating the AVMutableCompositionTrack. See how we are adding a new track to our AVMutableComposition.
        let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)

        // Now we set the length of the track equal to the length of the asset and add the asset to out newly created track at kCMTimeZero for first track and lastAssetTime for current track so video plays from the start of the track to end.
        if let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first {

            /// Hide time for this video's layer
            let opacityStartTime: CMTime = CMTimeMakeWithSeconds(0, asset.duration.timescale)
            let opacityEndTime: CMTime = CMTimeAdd(startAt, asset.duration)
            let hideAfter: CMTime = CMTimeAdd(opacityStartTime, opacityEndTime)

            /// Adding video track
            try? track?.insertTimeRange(timeRange, of: videoTrack, at: startAt)

            /// Layer instrcution
            let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track!)
            layerInstruction.setOpacity(0.0, at: hideAfter)

            /// Add logic for aspectFit in given area
            let properties = scaleAndPositionInAspectFitMode(forTrack: videoTrack, inArea: size)

            /// Checking for orientation
            let videoOrientation: UIImageOrientation = self.getVideoOrientation(forTrack: videoTrack)
            let assetSize = self.assetSize(forTrack: videoTrack)

            if (videoOrientation == .down) {
                /// Rotate
                let defaultTransfrom = asset.preferredTransform
                let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi/2.0))

                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                var ytranslation: CGFloat = assetSize.height
                var xtranslation: CGFloat = 0
                if properties.position.y == 0 {
                    xtranslation = -(assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
                }
                else {
                    ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
                }
                let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)

                // Final transformation - Concatination
                let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }
            else if (videoOrientation == .left) {

                /// Rotate
                let defaultTransfrom = asset.preferredTransform
                let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi))

                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                var ytranslation: CGFloat = assetSize.height
                var xtranslation: CGFloat = assetSize.width
                if properties.position.y == 0 {
                    xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
                }
                else {
                    ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
                }
                let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)

                // Final transformation - Concatination
                let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }
            else if (videoOrientation == .right) {
                /// No need to rotate
                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                let translationTransform = CGAffineTransform(translationX: properties.position.x, y: properties.position.y)

                let finalTransform  = scaleTransform.concatenating(translationTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }
            else {
                /// Rotate
                let defaultTransfrom = asset.preferredTransform
                let rotateTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2.0))

                // Scale
                let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)

                // Translate
                var ytranslation: CGFloat = 0
                var xtranslation: CGFloat = assetSize.width
                if properties.position.y == 0 {
                    xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
                }
                else {
                    ytranslation = -(assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
                }
                let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)

                // Final transformation - Concatination
                let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
                layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
            }

            instructionLayers.append(layerInstruction)
        }

        /// Adding audio
        if let audioTrack = asset.tracks(withMediaType: AVMediaType.audio).first {
            let aTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
            try? aTrack?.insertTimeRange(timeRange, of: audioTrack, at: startAt)
        }

        // Increase the startAt time
        startAt = CMTimeAdd(startAt, asset.duration)
    }


    /// Blur layer instruction
    let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: blurVideoTrack!)
    instructionLayers.append(layerInstruction)

    let mainInstruction = AVMutableVideoCompositionInstruction()
    mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, blurVideo.duration)
    mainInstruction.layerInstructions = instructionLayers

    let mainCompositionInst = AVMutableVideoComposition()
    mainCompositionInst.instructions = [mainInstruction]
    mainCompositionInst.frameDuration = CMTimeMake(1, 30)
    mainCompositionInst.renderSize = size

    //let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
    let url = self.videoOutputURL
    try? FileManager.default.removeItem(at: url)

    let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
    exporter?.outputURL = url
    exporter?.outputFileType = .mp4
    exporter?.videoComposition = mainCompositionInst
    exporter?.shouldOptimizeForNetworkUse = true
    exporter?.exportAsynchronously(completionHandler: {
        if let anError = exporter?.error {
            completion(anError, nil)
        }
        else if exporter?.status == AVAssetExportSessionStatus.completed {
            completion(nil, url)
        }
    })
}
Run Code Online (Sandbox Code Playgroud)

有关上述代码中使用的方法的帮助,请下载附带的示例代码.
如果有更短的方法,我也很期待你.因为我必须将视频导出3次才能实现这一点.