imp*_*7vx 15 buffer uiimage avassetwriter avmutablecomposition swift
我正在尝试将单个视频与单个图像组合在一起.这不是试图将许多图像组合成单个视频,例如
我AVMutableComposition用来组合曲目.我的应用程序能够组合视频和图像(但就目前而言,视频组合很好!)我尝试使用AVAssetWriter将单个图像转换为视频(我相信这是我的问题,但不是100%肯定) .然后我将其保存到app(documents directory).从那里,我在合并中访问它,并将视频和现在变成视频的图像组合在一起.
流程:
用户选择图像 - >
将图像转换为AVAssetWriter以更改为视频 - >
合并我已预设的视频视频 - >
结果:从所选图像和预设视频制作1个视频.
我的问题:我的代码提供了一个空白区域,视频内部的图像应该是.就像在我的ImageConverter文件中一样,它会将它转换为视频,但我只会看到最后一帧作为图像,而每隔一帧都是透明的,好像图片不存在一样.因此,如果我将图像转换为视频5秒钟(假设是30帧/秒),那么我将看到(30*5)-1帧的空白区域,然后是最后一帧,最终会出现图片.我正在寻找有关如何将单个图像制作成视频或将视频和图像组合在一起而不将图像转换为视频的指导.谢谢!
合并文件在这里
func merge() {
if let firstAsset = controller.firstAsset, secondAsset = self.asset {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
let firstTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, CMTime(seconds: 8, preferredTimescale: 600)),
ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] ,
atTime: kCMTimeZero)
} catch _ {
print("Failed to load first track")
}
do {
//HERE THE TIME IS 0.666667, BUT SHOULD BE 0
print(CMTimeGetSeconds(secondAsset.duration), CMTimeGetSeconds(firstTrack.timeRange.duration))
try firstTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAsset.duration),
ofTrack: secondAsset.tracksWithMediaType(AVMediaTypeVideo)[0],
atTime: firstTrack.timeRange.duration)
} catch _ {
print("Failed to load second track")
}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(CMTime(seconds: 8+CMTimeGetSeconds(secondAsset.duration), preferredTimescale: 600), firstAsset.duration),
ofTrack: firstAsset.tracksWithMediaType(AVMediaTypeVideo)[0] ,
atTime: firstTrack.timeRange.duration+secondTrack.timeRange.duration)
} catch _ {
print("failed")
}
// 3 - Audio track
if let loadedAudioAsset = controller.audioAsset {
let audioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: 0)
do {
try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration),
ofTrack: loadedAudioAsset.tracksWithMediaType(AVMediaTypeAudio)[0] ,
atTime: kCMTimeZero)
} catch _ {
print("Failed to load Audio track")
}
}
// 4 - Get path
let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
let dateFormatter = NSDateFormatter()
dateFormatter.dateStyle = .LongStyle
dateFormatter.timeStyle = .ShortStyle
let date = dateFormatter.stringFromDate(NSDate())
let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo.mov")
let url = NSURL(fileURLWithPath: savePath)
_ = try? NSFileManager().removeItemAtURL(url)
// 5 - Create Exporter
print("exporting")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = url
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.shouldOptimizeForNetworkUse = false
exporter.videoComposition = mainComposition
// 6 - Perform the Export
controller.currentlyEditing = true
exporter.exportAsynchronouslyWithCompletionHandler() {
dispatch_async(dispatch_get_main_queue()) { _ in
print("done")
self.controller.currentlyEditing = false
self.controller.merged = true
self.button.blurView.superview?.hidden = true
self.controller.player.replaceCurrentItemWithPlayerItem(AVPlayerItem(URL: url))
self.controller.firstAsset = AVAsset(URL: url)
}
}
}
}
func exportDidFinish(session: AVAssetExportSession) {
if session.status == AVAssetExportSessionStatus.Failed {
print(session.error)
}
if session.status == AVAssetExportSessionStatus.Completed {
print("succed")
}
}
Run Code Online (Sandbox Code Playgroud)
转换图像在这里
class MyConverter: NSObject {
var image:UIImage!
convenience init(image:UIImage) {
self.init()
self.image = image
}
var outputURL: NSURL {
let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
let savePath = (documentDirectory as NSString).stringByAppendingPathComponent("mergeVideo-pic.mov")
return getURL(savePath)
}
func getURL(path:String) -> NSURL {
let movieDestinationUrl = NSURL(fileURLWithPath: path)
_ = try? NSFileManager().removeItemAtURL(movieDestinationUrl)
let url = NSURL(fileURLWithPath: path)
return url
}
func build(completion:() -> Void) {
guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(image.size.width)), AVVideoHeightKey : NSNumber(float: Float(image.size.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(image.size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(image.size.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
}
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
var appendSucceeded = true
//Time HERE IS ZERO, but in Merge file, it is 0.66667
let presentationTime = CMTimeMake(0, 600)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(self.image.size.width), Int(self.image.size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.image.size.width), CGFloat(self.image.size.height)))
CGContextDrawImage(context, CGRectMake(0, 0, self.image.size.width, self.image.size.height), self.image.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
if !appendSucceeded {
print("append failed")
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
completion()
}
})
}
}
Run Code Online (Sandbox Code Playgroud)
注意:我发现如果我在图像转换器中执行print(presentationTime)INSIDE它会打印0,然后在合并中打印持续时间的时间,我得到0.666667
注意:还没有答案,但在我找到答案或其他人帮助我之前,我将不断提出这个问题!谢谢!
是的,所以我实际上刚刚处理过这个问题.问题确实在于如何从图片中创建视频.您需要做的是在零时加上像素缓冲区,然后在结尾处再加上AGAIN,否则您最终会得到一个空视频,直到最后一帧,就像您遇到的那样.
以下代码将是我更新代码的最佳尝试.在最后,我将发布我在Objective-C中的解决方案,以防它帮助其他任何人.
func build(completion:() -> Void) {
guard let videoWriter = try? AVAssetWriter(URL: outputURL, fileType: AVFileTypeQuickTimeMovie) else {
fatalError("AVAssetWriter error")
}
// This might not be a problem for you but width HAS to be divisible by 16 or the movie will come out distorted... don't ask me why. So this is a safeguard
let pixelsToRemove: Double = fmod(image.size.width, 16)
let pixelsToAdd: Double = 16 - pixelsToRemove
let size: CGSize = CGSizeMake(image.size.width + pixelsToAdd, image.size.height)
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(size.width)), AVVideoHeightKey : NSNumber(float: Float(size.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(size.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(size.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
}
// For simplicity, I'm going to remove the media queue you created and instead explicitly wait until I can append since i am only writing one pixel buffer at two different times
var pixelBufferCreated = true
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(size.width), CGFloat(size.height)))
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.image.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
} else {
print("Failed to allocate pixel buffer")
pixelBufferCreated = false
}
if (pixelBufferCreated) {
// Here is where the magic happens, we have our pixelBuffer it's time to start writing
// FIRST - add at time zero
var appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: kCMTimeZero];
if (!appendSucceeded) {
// something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though
}
// SECOND - wait until the writer is ready for more data with an empty while
while !writerInput.readyForMoreMediaData {}
// THIRD - make a CMTime with the desired length of your picture-video. I am going to arbitrarily make it 5 seconds here
let frameTime: CMTime = CMTimeMake(5, 1) // 5 seconds
// FOURTH - add the same exact pixel to the end of the video you are creating
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: frameTime];
if (!appendSucceeded) {
// something went wrong, up to you to handle. Should probably return so the rest of the code is not executed though
}
videoWriterInput.markAsFinished() {
videoWriter.endSessionAtSourceTime(frameTime)
}
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if videoWriter.status != .Completed {
// Error writing the video... handle appropriately
} else {
print("FINISHED!!!!!")
completion()
}
}
}
}
Run Code Online (Sandbox Code Playgroud)
注意:我必须进行一些编辑以使其独立,因此此方法将返回一个字符串,其中包含视频将写入的路径.之前的视频写入完成,因此它返回可能能够访问它之前,它是准备好,如果你不小心
-(NSString *)makeMovieFromImageData:(NSData *)imageData {
NSError *error;
UIImage *image = [UIImage imageWithData:imageData];
// width has to be divisible by 16 or the movie comes out distorted... don't ask me why
double pixelsToRemove = fmod(image.size.width, 16);
double pixelsToAdd = 16 - pixelsToRemove;
CGSize size = CGSizeMake(image.size.width+pixelsToAdd, image.size.height);
BOOL hasFoundValidPath = NO;
NSURL *tempFileURL;
NSString *outputFile;
while (!hasFoundValidPath) {
NSString *guid = [[NSUUID new] UUIDString];
outputFile = [NSString stringWithFormat:@"picture_%@.mp4", guid];
NSString *outputDirectory = [NSSearchPathForDirectoriesInDomains(NSTemporaryDirectory, NSUserDomainMask, YES) objectAtIndex:0];
NSString *tempPath = [outputDirectory stringByAppendingPathComponent:outputFile];
// Will fail if destination already has a file
if ([[NSFileManager defaultManager] fileExistsAtPath:tempPath]) {
continue;
} else {
hasFoundValidPath = YES;
}
tempFileURL = [NSURL fileURLWithPath:tempPath];
}
// Start writing
AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:tempFileURL
fileType:AVFileTypeQuickTimeMovie
error:&error];
if (error) {
// handle error
}
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:size.width], AVVideoWidthKey,
[NSNumber numberWithInt:size.height], AVVideoHeightKey,
nil];
AVAssetWriterInput* writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo
outputSettings:videoSettings];
NSDictionary *bufferAttributes = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];
AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
sourcePixelBufferAttributes:bufferAttributes];
if ([videoWriter canAddInput:writerInput]) {
[videoWriter addInput:writerInput];
} else {
// handle error
}
[videoWriter startWriting];
[videoWriter startSessionAtSourceTime:kCMTimeZero];
CGImageRef img = [image CGImage];
// Now I am going to create the bixelBuffer
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey,
nil];
CVPixelBufferRef buffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width,
size.height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
&pxbuffer);
if ( !(status == kCVReturnSuccess && pxbuffer != NULL) ) {
NSLog(@"There be some issue. We didn't get a buffer from the image");
}
CVPixelBufferLockBaseAddress(buffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(buffer);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, size.width,
size.height, 8, 4*size.width, rgbColorSpace,
(CGBitmapInfo)kCGImageAlphaPremultipliedFirst);
CGContextSetRGBFillColor(context, 0, 0, 0, 0);
CGContextConcatCTM(context, CGAffineTransformIdentity);
CGContextDrawImage(context, CGRectMake(0, 0, size.width,
size.height), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(buffer, 0);
// At this point we have our buffer so we are going to start by adding to time zero
[adaptor appendPixelBuffer:buffer withPresentationTime:kCMTimeZero];
while (!writerInput.readyForMoreMediaData) {} // wait until ready
CMTime frameTime = CMTimeMake(5, 1); // 5 second frame
[adaptor appendPixelBuffer:buffer withPresentationTime:frameTime];
CFRelease(buffer);
[writerInput markAsFinished];
[videoWriter endSessionAtSourceTime:frameTime];
[videoWriter finishWritingWithCompletionHandler:^{
if (videoWriter.status != AVAssetWriterStatusCompleted) {
// Error
}
}]; // end videoWriter finishWriting Block
// NOTE: the URL is actually being returned before the videoWriter finishes writing so be careful to not access it until it's ready
return outputFile;
}
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
3703 次 |
| 最近记录: |