and*_*per 6 audio video android mediamuxer
我需要将视频文件和音频文件合并为一个视频文件,以便:
正如我所读过的,这种合并操作的技术术语称为“混合”。
例如,假设我们有一个10秒的输入视频和一个4秒的音频文件,输出视频将是10秒(始终与输入视频相同),并且音频将播放2.5次(前2个覆盖前8秒,然后剩下4秒中的2秒)。
我找到了一种解决方案,可以将视频和音频多路复用(在此处),但遇到了多个问题:
我无法弄清楚如何在需要时循环编写音频内容。无论我尝试什么,它总是给我一个错误
输入文件必须具有特定的文件格式。否则,它可能会引发异常,或者(在极少数情况下)更糟:创建具有黑色内容的视频文件。甚至更多:有时例如可以使用“ .mkv”文件,有时则无法接受(并且两者都可以在视频播放器应用上播放)。
当前代码处理缓冲区,而不是实际持续时间。这意味着在很多情况下,即使我不这样做,我也可能会停止混合音频,并且即使视频足够长,与原始视频相比,输出视频文件的音频内容也会更短。
我尝试通过以下方法使音频的MediaExtractor每次到达末尾时都开始播放:
if (audioBufferInfo.size < 0) {
Log.d("AppLog", "reached end of audio, looping...")
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, 0)
}
Run Code Online (Sandbox Code Playgroud)为了检查文件的类型,我尝试使用MediaMetadataRetriever然后检查mime-type。我认为受支持的文档可以在文档(此处)中作为标有“编码器” 的文档获得。对此不确定。我也不知道那是哪一种MIME类型。
我还尝试重新初始化与音频有关的所有内容,但也没有用。
这是我当前的多路复用本身的代码(可在此处找到完整的示例项目):
object VideoAndAudioMuxer {
// based on: /sf/answers/2211403981/
@WorkerThread
fun joinVideoAndAudio(videoFile: File, audioFile: File, outputFile: File): Boolean {
try {
// val videoMediaMetadataRetriever = MediaMetadataRetriever()
// videoMediaMetadataRetriever.setDataSource(videoFile.absolutePath)
// val videoDurationInMs =
// videoMediaMetadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION).toLong()
// val videoMimeType =
// videoMediaMetadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_MIMETYPE)
// val audioMediaMetadataRetriever = MediaMetadataRetriever()
// audioMediaMetadataRetriever.setDataSource(audioFile.absolutePath)
// val audioDurationInMs =
// audioMediaMetadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION).toLong()
// val audioMimeType =
// audioMediaMetadataRetriever.extractMetadata(MediaMetadataRetriever.METADATA_KEY_MIMETYPE)
// Log.d(
// "AppLog",
// "videoDuration:$videoDurationInMs audioDuration:$audioDurationInMs videoMimeType:$videoMimeType audioMimeType:$audioMimeType"
// )
// videoMediaMetadataRetriever.release()
// audioMediaMetadataRetriever.release()
outputFile.delete()
outputFile.createNewFile()
val muxer = MediaMuxer(outputFile.absolutePath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4)
val sampleSize = 256 * 1024
//video
val videoExtractor = MediaExtractor()
videoExtractor.setDataSource(videoFile.absolutePath)
videoExtractor.selectTrack(0)
videoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
val videoFormat = videoExtractor.getTrackFormat(0)
val videoTrack = muxer.addTrack(videoFormat)
val videoBuf = ByteBuffer.allocate(sampleSize)
val videoBufferInfo = MediaCodec.BufferInfo()
// Log.d("AppLog", "Video Format $videoFormat")
//audio
val audioExtractor = MediaExtractor()
audioExtractor.setDataSource(audioFile.absolutePath)
audioExtractor.selectTrack(0)
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
val audioFormat = audioExtractor.getTrackFormat(0)
val audioTrack = muxer.addTrack(audioFormat)
val audioBuf = ByteBuffer.allocate(sampleSize)
val audioBufferInfo = MediaCodec.BufferInfo()
// Log.d("AppLog", "Audio Format $audioFormat")
//
muxer.start()
// Log.d("AppLog", "muxing video&audio...")
// val minimalDurationInMs = Math.min(videoDurationInMs, audioDurationInMs)
while (true) {
videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, 0)
audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, 0)
if (audioBufferInfo.size < 0) {
// Log.d("AppLog", "reached end of audio, looping...")
//TODO somehow start from beginning of the audio again, for looping till the video ends
// audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
// audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, 0)
}
if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0) {
// Log.d("AppLog", "reached end of video")
videoBufferInfo.size = 0
audioBufferInfo.size = 0
break
} else {
// val donePercentage = videoExtractor.sampleTime / minimalDurationInMs / 10L
// Log.d("AppLog", "$donePercentage")
// video muxing
videoBufferInfo.presentationTimeUs = videoExtractor.sampleTime
videoBufferInfo.flags = videoExtractor.sampleFlags
muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo)
videoExtractor.advance()
// audio muxing
audioBufferInfo.presentationTimeUs = audioExtractor.sampleTime
audioBufferInfo.flags = audioExtractor.sampleFlags
muxer.writeSampleData(audioTrack, audioBuf, audioBufferInfo)
audioExtractor.advance()
}
}
muxer.stop()
muxer.release()
// Log.d("AppLog", "success")
return true
} catch (e: Exception) {
e.printStackTrace()
// Log.d("AppLog", "Error " + e.message)
}
return false
}
}
Run Code Online (Sandbox Code Playgroud)
我如何混音视频和音频文件,以便在音频比视频短(持续时间)的情况下音频会循环播放?
如何做到在视频结束时准确地剪切音频(视频和音频中都没有剩余)?
如何在调用此函数之前检查当前设备是否可以处理给定的输入文件并实际将其混合?有没有一种方法可以在运行时检查这种操作所支持的方法,而不是依赖将来可能会更改的文档列表?
我也有同样的场景。
1:当audioBufferInfo.size< 0时,寻道开始。但记住,你需要积累presentationTimeUs。
2:获取视频时长,当音频循环到时长(presentationTimeUs也用)时,进行剪切。
3:音频文件需要是MediaFormat.MIMETYPE_AUDIO_AMR_NB或MediaFormat.MIMETYPE_AUDIO_AMR_WB或MediaFormat.MIMETYPE_AUDIO_AAC。在我的测试机器上,它运行良好。
这是代码:
private fun muxing(musicName: String) {
val saveFile = File(DirUtils.getPublicMediaPath(), "$saveName.mp4")
if (saveFile.exists()) {
saveFile.delete()
PhotoHelper.sendMediaScannerBroadcast(saveFile)
}
try {
// get the video file duration in microseconds
val duration = getVideoDuration(mSaveFile!!.absolutePath)
saveFile.createNewFile()
val videoExtractor = MediaExtractor()
videoExtractor.setDataSource(mSaveFile!!.absolutePath)
val audioExtractor = MediaExtractor()
val afdd = MucangConfig.getContext().assets.openFd(musicName)
audioExtractor.setDataSource(afdd.fileDescriptor, afdd.startOffset, afdd.length)
val muxer = MediaMuxer(saveFile.absolutePath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4)
videoExtractor.selectTrack(0)
val videoFormat = videoExtractor.getTrackFormat(0)
val videoTrack = muxer.addTrack(videoFormat)
audioExtractor.selectTrack(0)
val audioFormat = audioExtractor.getTrackFormat(0)
val audioTrack = muxer.addTrack(audioFormat)
var sawEOS = false
val offset = 100
val sampleSize = 1000 * 1024
val videoBuf = ByteBuffer.allocate(sampleSize)
val audioBuf = ByteBuffer.allocate(sampleSize)
val videoBufferInfo = MediaCodec.BufferInfo()
val audioBufferInfo = MediaCodec.BufferInfo()
videoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
muxer.start()
val frameRate = videoFormat.getInteger(MediaFormat.KEY_FRAME_RATE)
val videoSampleTime = 1000 * 1000 / frameRate
while (!sawEOS) {
videoBufferInfo.offset = offset
videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, offset)
if (videoBufferInfo.size < 0) {
sawEOS = true
videoBufferInfo.size = 0
} else {
videoBufferInfo.presentationTimeUs += videoSampleTime
videoBufferInfo.flags = videoExtractor.sampleFlags
muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo)
videoExtractor.advance()
}
}
var sawEOS2 = false
var sampleTime = 0L
while (!sawEOS2) {
audioBufferInfo.offset = offset
audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, offset)
if (audioBufferInfo.presentationTimeUs >= duration) {
sawEOS2 = true
audioBufferInfo.size = 0
} else {
if (audioBufferInfo.size < 0) {
sampleTime = audioBufferInfo.presentationTimeUs
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
continue
}
}
audioBufferInfo.presentationTimeUs = audioExtractor.sampleTime + sampleTime
audioBufferInfo.flags = audioExtractor.sampleFlags
muxer.writeSampleData(audioTrack, audioBuf, audioBufferInfo)
audioExtractor.advance()
}
muxer.stop()
muxer.release()
videoExtractor.release()
audioExtractor.release()
afdd.close()
} catch (e: Exception) {
LogUtils.e(TAG, "Mixer Error:" + e.message)
}
}
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
320 次 |
| 最近记录: |