Ste*_*all 5 avfoundation avassetwriter swift
我想录制一系列片段,通过视频播放器一起播放或ffmpeg -f concat无缝播放.
在任何一种情况下,我都会在每个分段连接点处得到非常明显的音频打嗝.
我目前的策略是维持2个AssetWriter实例.在每个截止点,我开始一个新的作家,等到它准备好,然后开始给它样品.当视频和音频样本在特定时间点完成时,我关闭最后一个作者.
如何修改此项以获得连续剪辑录制?根本原因是什么问题?
import Foundation
import UIKit
import AVFoundation
class StreamController: UIViewController, AVCaptureAudioDataOutputSampleBufferDelegate, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var previewView: UIView!
var closingVideoInput: AVAssetWriterInput?
var closingAudioInput: AVAssetWriterInput?
var closingAssetWriter: AVAssetWriter?
var currentVideoInput: AVAssetWriterInput?
var currentAudioInput: AVAssetWriterInput?
var currentAssetWriter: AVAssetWriter?
var nextVideoInput: AVAssetWriterInput?
var nextAudioInput: AVAssetWriterInput?
var nextAssetWriter: AVAssetWriter?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoHelper: VideoHelper?
var startTime: NSTimeInterval = 0
override func viewDidLoad() {
super.viewDidLoad()
startTime = NSDate().timeIntervalSince1970
createSegmentWriter()
videoHelper = VideoHelper()
videoHelper!.delegate = self
videoHelper!.startSession()
NSTimer.scheduledTimerWithTimeInterval(5, target: self, selector: "createSegmentWriter", userInfo: nil, repeats: true)
}
func createSegmentWriter() {
print("Creating segment writer at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
nextAssetWriter = try! AVAssetWriter(URL: NSURL(fileURLWithPath: OutputFileNameHelper.instance.pathForOutput()), fileType: AVFileTypeMPEG4)
nextAssetWriter!.shouldOptimizeForNetworkUse = true
let videoSettings: [String:AnyObject] = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: 960, AVVideoHeightKey: 540]
nextVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
nextVideoInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextVideoInput!)
let audioSettings: [String:AnyObject] = [
AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC),
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
]
nextAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
nextAudioInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextAudioInput!)
nextAssetWriter!.startWriting()
}
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
previewLayer = AVCaptureVideoPreviewLayer(session: videoHelper!.captureSession)
previewLayer!.frame = self.previewView.bounds
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
if ((previewLayer?.connection?.supportsVideoOrientation) != nil) {
previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
}
self.previewView.layer.addSublayer(previewLayer!)
}
func closeWriter() {
if videoFinished && audioFinished {
let outputFile = closingAssetWriter?.outputURL.pathComponents?.last
closingAssetWriter?.finishWritingWithCompletionHandler() {
let delta = NSDate().timeIntervalSince1970 - self.startTime
print("segment \(outputFile) finished at t=\(delta)")
}
self.closingAudioInput = nil
self.closingVideoInput = nil
self.closingAssetWriter = nil
audioFinished = false
videoFinished = false
}
}
func closingVideoFinished() {
if closingVideoInput != nil {
videoFinished = true
closeWriter()
}
}
func closingAudioFinished() {
if closingAudioInput != nil {
audioFinished = true
closeWriter()
}
}
var closingTime: CMTime = kCMTimeZero
var audioFinished = false
var videoFinished = false
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBufferRef, fromConnection connection: AVCaptureConnection!) {
let sampleTime: CMTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if let nextWriter = nextAssetWriter {
if nextWriter.status.rawValue != 0 {
print("Switching asset writers at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
closingAssetWriter = currentAssetWriter
closingVideoInput = currentVideoInput
closingAudioInput = currentAudioInput
currentAssetWriter = nextAssetWriter
currentVideoInput = nextVideoInput
currentAudioInput = nextAudioInput
nextAssetWriter = nil
nextVideoInput = nil
nextAudioInput = nil
closingTime = sampleTime
currentAssetWriter!.startSessionAtSourceTime(sampleTime)
}
}
if currentAssetWriter != nil {
if let _ = captureOutput as? AVCaptureVideoDataOutput {
if (CMTimeCompare(sampleTime, closingTime) < 0) {
if closingVideoInput?.readyForMoreMediaData == true {
closingVideoInput?.appendSampleBuffer(sampleBuffer)
}
} else {
closingVideoFinished()
if currentVideoInput?.readyForMoreMediaData == true {
currentVideoInput?.appendSampleBuffer(sampleBuffer)
}
}
} else if let _ = captureOutput as? AVCaptureAudioDataOutput {
if (CMTimeCompare(sampleTime, closingTime) < 0) {
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
} else {
closingAudioFinished()
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
}
}
}
}
override func shouldAutorotate() -> Bool {
return true
}
override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
return [UIInterfaceOrientationMask.LandscapeRight]
}
}
Run Code Online (Sandbox Code Playgroud)
我认为根本原因是由于视频和音频CMSampleBuffer代表不同的时间间隔。您需要分割和加入音频,CMSampleBuffer使它们无缝地插入您AVAssetWriter的时间线,这可能应该基于视频演示时间戳。
为什么必须改变音频而不是视频?看起来不对称,但我猜这是因为音频具有更高的采样率。
ps 实际上创建新的分割样本缓冲区看起来很吓人。CMSampleBufferCreate有很多论据。CMSampleBufferCopySampleBufferForRange使用起来可能更容易、更高效。
| 归档时间: |
|
| 查看次数: |
1326 次 |
| 最近记录: |