EI *_*2.0 5 video avfoundation ios avasset swift
我正在尝试合并视频上的贴纸。它现在适用于风景,也适用于肖像视频。
由于合并景观问题,我无法根据视频的自然大小保持贴纸比例和来源。使用肖像,这完全是一团糟。如果有人想查找代码并想对其进行测试,请访问GitHub 链接。
以下是重要的 2 种主要方法
第一个是获取视频的分辨率,并根据该分辨率管理包含图像的视频容器大小
private func setupVideoPlayer() {
    let currentFrameSize = currentVideoFrameSize()
    
    videoeHeightConstraint.constant = currentFrameSize.height
    videoWidthConstraint.constant = currentFrameSize.width
    
    videoPlayer = AVPlayer(playerItem: playerItem)
    
    let playerLayer = AVPlayerLayer(player: videoPlayer)
    playerLayer.frame = CGRect(x: 0, y: 0, width: currentFrameSize.width, height: currentFrameSize.height)
    playerLayer.videoGravity = .resizeAspect
    
    videoContentView.layer.addSublayer(playerLayer)
    videoContentView.bringSubviewToFront(stickersContentView)
    videoPlayer?.play()
}
private func currentVideoFrameSize() -> CGSize {
    guard let asset = playerItem?.asset as? AVURLAsset, let track = asset.tracks(withMediaType: AVMediaType.video).first else { return .zero }
    let trackSize      = track.naturalSize
    let videoViewSize  = videoContentView.superview!.bounds.size
    let trackRatio     = trackSize.width / trackSize.height
    let videoViewRatio = videoViewSize.width / videoViewSize.height
    
    var newSize: CGSize
    if videoViewRatio > trackRatio {
        newSize = CGSize(width: trackSize.width * videoViewSize.height / trackSize.height, height: videoViewSize.height)
    } else {
        newSize = CGSize(width: videoViewSize.width, height: trackSize.height * videoViewSize.width / trackSize.width)
    }
    
    let assetInfo = VideoManager.shared.orientationFromTransform(transform: track.preferredTransform)
    if assetInfo.isPortrait {
        let tempSize = newSize
        newSize.width = tempSize.height
        newSize.height = tempSize.width
    }
    
    return newSize
}
Run Code Online (Sandbox Code Playgroud)
对于合并:
func makeVideoFrom(video: VideoData, images: [VideoOverlayImage], completion:@escaping Completion) -> Void {
    var outputSize: CGSize = .zero
    var insertTime: CMTime = .zero
    var arrayLayerInstructions: [AVMutableVideoCompositionLayerInstruction] = []
    var arrayLayerImages: [CALayer] = []
                    
    // Init composition
    let mixComposition = AVMutableComposition()
    
    // Get video track
    guard let videoTrack = video.asset.tracks(withMediaType: AVMediaType.video).first else { return }
    
    let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
    
    var videoSize = videoTrack.naturalSize
    if assetInfo.isPortrait == true {
        videoSize.width = videoTrack.naturalSize.height
        videoSize.height = videoTrack.naturalSize.width
    }
    if videoSize.height > outputSize.height {
        outputSize = videoSize
    }
    if outputSize.width == 0 || outputSize.height == 0 {
        outputSize = defaultSize
    }
    
    // Get audio track
    var audioTrack: AVAssetTrack?
    if video.asset.tracks(withMediaType: AVMediaType.audio).count > 0 {
        audioTrack = video.asset.tracks(withMediaType: AVMediaType.audio).first
    }
    
    // Init video & audio composition track
    let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
                                                               preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
    
    let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
                                                               preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
    
    do {
        let startTime = CMTime.zero
        let duration = video.asset.duration
        
        // Add video track to video composition at specific time
        try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
                                                  of: videoTrack,
                                                  at: insertTime)
        
        // Add audio track to audio composition at specific time
        if let audioTrack = audioTrack {
            try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
                                                      of: audioTrack,
                                                      at: insertTime)
        }
        
        // Add instruction for video track
        let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
                                                                   asset: video.asset,
                                                                   standardSize: outputSize,
                                                                   atTime: insertTime)
        
        // Hide video track before changing to new track
        let endTime = CMTimeAdd(insertTime, duration)
        let timeScale = video.asset.duration.timescale
        let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
        
        layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
        
        arrayLayerInstructions.append(layerInstruction)
        
        // Increase the insert time
        insertTime = CMTimeAdd(insertTime, duration)
    } catch {
        print("Load track error")
    }
    
    // Merge
    for image in images {
        let animatedImageLayer = CALayer()
                    
        let aspectWidth  = assetInfo.isPortrait ? outputSize.width/video.frame.height : outputSize.width/video.frame.width
        let aspectHeight = assetInfo.isPortrait ? outputSize.height/video.frame.width : outputSize.height/video.frame.height
        let aspectRatio = min(aspectWidth, aspectHeight)
        let scaledWidth  = image.frame.width * aspectRatio
        let scaledHeight = image.frame.height * aspectRatio
        
        let cx = (image.frame.minX * aspectRatio) + (scaledWidth / 2)
        let cy = (image.frame.minY * aspectRatio) + (scaledHeight / 2)
        var iFrame = image.frame
        iFrame.size.width = scaledWidth
        iFrame.size.height = scaledWidth
        animatedImageLayer.frame = iFrame
        animatedImageLayer.position = CGPoint(x: assetInfo.isPortrait ? cy : cx, y: assetInfo.isPortrait ? cx : cy)
        
        if let animatedURL = URL(string: image.url), let animation = animatedImage(with: animatedURL) {
            animatedImageLayer.add(animation, forKey: "contents")
        }
        
        arrayLayerImages.append(animatedImageLayer)
    }
    
    // Init Video layer
    let videoLayer = CALayer()
    videoLayer.frame = CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
    
    let parentlayer = CALayer()
    parentlayer.frame = CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
    
    parentlayer.addSublayer(videoLayer)
    
    // Add Image layers
    arrayLayerImages.forEach { parentlayer.addSublayer($0) }
    
    // Main video composition instruction
    let mainInstruction = AVMutableVideoCompositionInstruction()
    mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: insertTime)
    mainInstruction.layerInstructions = arrayLayerInstructions
            
    // Main video composition
    let mainComposition = AVMutableVideoComposition()
    mainComposition.instructions = [mainInstruction]
    mainComposition.renderSize = outputSize
    mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
    mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
    
    // Export to file
    let path = NSTemporaryDirectory().appending("stickers_video_merge.mov")
    let exportURL = URL(fileURLWithPath: path)
    
    // Remove file if existed
    FileManager.default.removeItemIfExisted(exportURL)
    
    let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
    exporter?.outputURL = exportURL
    exporter?.outputFileType = .mov
    exporter?.shouldOptimizeForNetworkUse = true
    exporter?.videoComposition = mainComposition
    
    // Do export
    exporter?.exportAsynchronously() {
        DispatchQueue.main.async {
            self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
        }
    }
}
Run Code Online (Sandbox Code Playgroud)
大部分问题:
您还可以检查不需要保持任何比例但图像帧和纵向视频有问题的开发分支。任何人都可以在您的设备上运行它并了解实际问题。
|   归档时间:  |  
           
  |  
        
|   查看次数:  |  
           126 次  |  
        
|   最近记录:  |