所以我有VideoEncoder.h这样的代码
/*
FFmpeg simple Encoder
*/
#ifndef __VIDEO_ENCODER_H__
#define __VIDEO_ENCODER_H__
#include "ffmpegInclude.h"
#include <Windows.h>
#include <string>
class VideoEncoder
{
private:
// output file name
std::string outputFilename;
// output format.
AVOutputFormat *pOutFormat;
// format context
AVFormatContext *pFormatContext;
// video stream context
AVStream * pVideoStream;
// audio streams context
AVStream * pAudioStream;
// convert context context
struct SwsContext *pImgConvertCtx;
// encode buffer and size
uint8_t * pVideoEncodeBuffer;
int nSizeVideoEncodeBuffer;
// audio buffer and size
uint8_t * pAudioEncodeBuffer;
int nSizeAudioEncodeBuffer;
// count of sample
int audioInputSampleSize;
// current picture
AVFrame *pCurrentPicture;
// audio buffer
char* audioBuffer;
int nAudioBufferSize;
int nAudioBufferSizeCurrent;
public:
VideoEncoder()
{
pOutFormat = NULL;
pFormatContext = NULL;
pVideoStream = NULL;
pImgConvertCtx = NULL;
pCurrentPicture = NULL;
pVideoEncodeBuffer = NULL;
nSizeVideoEncodeBuffer = 0;
pAudioEncodeBuffer = NULL;
nSizeAudioEncodeBuffer = 0;
nAudioBufferSize = 1024 * 1024 * 4;
audioBuffer = new char[nAudioBufferSize];
nAudioBufferSizeCurrent = 0;
}
virtual ~VideoEncoder()
{
Finish();
}
// init output file
bool InitFile(std::string& inputFile, std::string& container);
// Add video and audio data
bool AddFrame(AVFrame* frame, const char* soundBuffer, int soundBufferSize);
// end of output
bool Finish();
private:
// Add video stream
AVStream *AddVideoStream(AVFormatContext *pContext, CodecID codec_id);
// Open Video Stream
bool OpenVideo(AVFormatContext *oc, AVStream *pStream);
// Allocate memory
AVFrame * CreateFFmpegPicture(int pix_fmt, int nWidth, int nHeight);
// Close video stream
void CloseVideo(AVFormatContext *pContext, AVStream *pStream);
// Add audio stream
AVStream * AddAudioStream(AVFormatContext *pContext, CodecID codec_id);
// Open audio stream
bool OpenAudio(AVFormatContext *pContext, AVStream *pStream);
// close audio stream
void CloseAudio(AVFormatContext *pContext, AVStream *pStream);
// Add video frame
bool AddVideoFrame(AVFrame * frame, AVCodecContext *pVideoCodec);
// Add audio samples
bool AddAudioSample(AVFormatContext *pFormatContext,
AVStream *pStream, const char* soundBuffer, int soundBufferSize);
// Free resourses.
void Free();
bool NeedConvert();
};
#endif // __VIDEO_ENCODER_H__
Run Code Online (Sandbox Code Playgroud)
所以我在这里看到InitFile,AddFrame和Finish.
在VideoEncoder.cpp中,我看到了这一点
#include <stdio.h>
#include <stdlib.h>
#include "ffmpegInclude.h"
#include <math.h>
#include "VideoEncoder.h"
#include "Settings.h"
#define MAX_AUDIO_PACKET_SIZE (128 * 1024)
bool VideoEncoder::InitFile(std::string& inputFile, std::string& container)
{
bool res = false;
const char * filename = inputFile.c_str();
outputFilename = inputFile;
// Initialize libavcodec
av_register_all();
if (container == std::string("auto"))
{
// Create format
pOutFormat = guess_format(NULL, filename, NULL);
}
else
{
// use contanier
pOutFormat = guess_format(container.c_str(), NULL, NULL);
}
if (pOutFormat)
{
// allocate context
pFormatContext = avformat_alloc_context();
if (pFormatContext)
{
pFormatContext->oformat = pOutFormat;
memcpy(pFormatContext->filename, filename, min(strlen(filename),
sizeof(pFormatContext->filename)));
// Add video and audio stream
pVideoStream = AddVideoStream(pFormatContext, pOutFormat->video_codec);
pAudioStream = AddAudioStream(pFormatContext, pOutFormat->audio_codec);
// Set the output parameters (must be done even if no
// parameters).
if (av_set_parameters(pFormatContext, NULL) >=0)
{
dump_format(pFormatContext, 0, filename, 1);
// Open Video and Audio stream
res = false;
if (pVideoStream)
{
res = OpenVideo(pFormatContext, pVideoStream);
}
res = OpenAudio(pFormatContext, pAudioStream);
if (res && !(pOutFormat->flags & AVFMT_NOFILE))
{
if (url_fopen(&pFormatContext->pb, filename, URL_WRONLY) < 0)
{
res = false;
printf("Cannot open file\n");
}
}
if (res)
{
av_write_header(pFormatContext);
res = true;
}
}
}
}
if (!res)
{
Free();
printf("Cannot init file\n");
}
return res;
}
bool VideoEncoder::AddFrame(AVFrame* frame, const char* soundBuffer, int soundBufferSize)
{
bool res = true;
int nOutputSize = 0;
AVCodecContext *pVideoCodec = NULL;
if (pVideoStream && frame && frame->data[0])
{
pVideoCodec = pVideoStream->codec;
if (NeedConvert())
{
// RGB to YUV420P.
if (!pImgConvertCtx)
{
pImgConvertCtx = sws_getContext(pVideoCodec->width, pVideoCodec->height,
PIX_FMT_RGB24,
pVideoCodec->width, pVideoCodec->height,
pVideoCodec->pix_fmt,
SWS_BICUBLIN, NULL, NULL, NULL);
}
}
// Allocate picture.
pCurrentPicture = CreateFFmpegPicture(pVideoCodec->pix_fmt, pVideoCodec->width,
pVideoCodec->height);
if (NeedConvert() && pImgConvertCtx)
{
// Convert RGB to YUV.
sws_scale(pImgConvertCtx, frame->data, frame->linesize,
0, pVideoCodec->height, pCurrentPicture->data, pCurrentPicture->linesize);
}
res = AddVideoFrame(pCurrentPicture, pVideoStream->codec);
// Free temp frame
av_free(pCurrentPicture->data[0]);
av_free(pCurrentPicture);
pCurrentPicture = NULL;
}
// Add sound
if (soundBuffer && soundBufferSize > 0)
{
res = AddAudioSample(pFormatContext, pAudioStream, soundBuffer, soundBufferSize);
}
return res;
}
bool VideoEncoder::Finish()
{
bool res = true;
if (pFormatContext)
{
av_write_trailer(pFormatContext);
Free();
}
if (audioBuffer)
{
delete[] audioBuffer;
audioBuffer = NULL;
}
return res;
}
void VideoEncoder::Free()
{
bool res = true;
if (pFormatContext)
{
// close video stream
if (pVideoStream)
{
CloseVideo(pFormatContext, pVideoStream);
}
// close audio stream.
if (pAudioStream)
{
CloseAudio(pFormatContext, pAudioStream);
}
// Free the streams.
for(size_t i = 0; i < pFormatContext->nb_streams; i++)
{
av_freep(&pFormatContext->streams[i]->codec);
av_freep(&pFormatContext->streams[i]);
}
if (!(pFormatContext->flags & AVFMT_NOFILE) && pFormatContext->pb)
{
url_fclose(pFormatContext->pb);
}
// Free the stream.
av_free(pFormatContext);
pFormatContext = NULL;
}
}
AVFrame * VideoEncoder::CreateFFmpegPicture(int pix_fmt, int nWidth, int nHeight)
{
AVFrame *picture = NULL;
uint8_t *picture_buf = NULL;
int size;
picture = avcodec_alloc_frame();
if ( !picture)
{
printf("Cannot create frame\n");
return NULL;
}
size = avpicture_get_size(pix_fmt, nWidth, nHeight);
picture_buf = (uint8_t *) av_malloc(size);
if (!picture_buf)
{
av_free(picture);
printf("Cannot allocate buffer\n");
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, nWidth, nHeight);
return picture;
}
bool VideoEncoder::OpenVideo(AVFormatContext *oc, AVStream *pStream)
{
AVCodec *pCodec;
AVCodecContext *pContext;
pContext = pStream->codec;
// Find the video encoder.
pCodec = avcodec_find_encoder(pContext->codec_id);
if (!pCodec)
{
printf("Cannot found video codec\n");
return false;
}
// Open the codec.
if (avcodec_open(pContext, pCodec) < 0)
{
printf("Cannot open video codec\n");
return false;
}
pVideoEncodeBuffer = NULL;
if (!(pFormatContext->oformat->flags & AVFMT_RAWPICTURE))
{
/* allocate output buffer */
nSizeVideoEncodeBuffer = 10000000;
pVideoEncodeBuffer = (uint8_t *)av_malloc(nSizeVideoEncodeBuffer);
}
return true;
}
void VideoEncoder::CloseVideo(AVFormatContext *pContext, AVStream *pStream)
{
avcodec_close(pStream->codec);
if (pCurrentPicture)
{
if (pCurrentPicture->data)
{
av_free(pCurrentPicture->data[0]);
pCurrentPicture->data[0] = NULL;
}
av_free(pCurrentPicture);
pCurrentPicture = NULL;
}
if (pVideoEncodeBuffer)
{
av_free(pVideoEncodeBuffer);
pVideoEncodeBuffer = NULL;
}
nSizeVideoEncodeBuffer = 0;
}
bool VideoEncoder::NeedConvert()
{
bool res = false;
if (pVideoStream && pVideoStream->codec)
{
res = (pVideoStream->codec->pix_fmt != PIX_FMT_RGB24);
}
return res;
}
AVStream *VideoEncoder::AddVideoStream(AVFormatContext *pContext, CodecID codec_id)
{
AVCodecContext *pCodecCxt = NULL;
AVStream *st = NULL;
st = av_new_stream(pContext, 0);
if (!st)
{
printf("Cannot add new vidoe stream\n");
return NULL;
}
pCodecCxt = st->codec;
pCodecCxt->codec_id = (CodecID)codec_id;
pCodecCxt->codec_type = CODEC_TYPE_VIDEO;
pCodecCxt->frame_number = 0;
// Put sample parameters.
pCodecCxt->bit_rate = 2000000;
// Resolution must be a multiple of two.
pCodecCxt->width = W_VIDEO;
pCodecCxt->height = H_VIDEO;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
pCodecCxt->time_base.den = 25;
pCodecCxt->time_base.num = 1;
pCodecCxt->gop_size = 12; // emit one intra frame every twelve frames at most
pCodecCxt->pix_fmt = PIX_FMT_YUV420P;
if (pCodecCxt->codec_id == CODEC_ID_MPEG2VIDEO)
{
// Just for testing, we also add B frames
pCodecCxt->max_b_frames = 2;
}
if (pCodecCxt->codec_id == CODEC_ID_MPEG1VIDEO)
{
/* Needed to avoid using macroblocks in which some coeffs overflow.
This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */
pCodecCxt->mb_decision = 2;
}
// Some formats want stream headers to be separate.
if(pContext->oformat->flags & AVFMT_GLOBALHEADER)
{
pCodecCxt->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return st;
}
AVStream * VideoEncoder::AddAudioStream(AVFormatContext *pContext, CodecID codec_id)
{
AVCodecContext *pCodecCxt = NULL;
AVStream *pStream = NULL;
// Try create stream.
pStream = av_new_stream(pContext, 1);
if (!pStream)
{
printf("Cannot add new audio stream\n");
return NULL;
}
// Codec.
pCodecCxt = pStream->codec;
pCodecCxt->codec_id = codec_id;
pCodecCxt->codec_type = CODEC_TYPE_AUDIO;
// Set format
pCodecCxt->bit_rate = 128000;
pCodecCxt->sample_rate = 44100;
pCodecCxt->channels = 1;
pCodecCxt->sample_fmt = SAMPLE_FMT_S16;
nSizeAudioEncodeBuffer = 4 * MAX_AUDIO_PACKET_SIZE;
if (pAudioEncodeBuffer == NULL)
{
pAudioEncodeBuffer = (uint8_t * )av_malloc(nSizeAudioEncodeBuffer);
}
// Some formats want stream headers to be separate.
if(pContext->oformat->flags & AVFMT_GLOBALHEADER)
{
pCodecCxt->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return pStream;
}
bool VideoEncoder::OpenAudio(AVFormatContext *pContext, AVStream *pStream)
{
AVCodecContext *pCodecCxt = NULL;
AVCodec *pCodec = NULL;
pCodecCxt = pStream->codec;
// Find the audio encoder.
pCodec = avcodec_find_encoder(pCodecCxt->codec_id);
if (!pCodec)
{
printf("Cannot open audio codec\n");
return false;
}
// Open it.
if (avcodec_open(pCodecCxt, pCodec) < 0)
{
printf("Cannot open audio codec\n");
return false;
}
if (pCodecCxt->frame_size <= 1)
{
// Ugly hack for PCM codecs (will be removed ASAP with new PCM
// support to compute the input frame size in samples.
audioInputSampleSize = nSizeAudioEncodeBuffer / pCodecCxt->channels;
switch (pStream->codec->codec_id)
{
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
audioInputSampleSize >>= 1;
break;
default:
break;
}
pCodecCxt->frame_size = audioInputSampleSize;
}
else
{
audioInputSampleSize = pCodecCxt->frame_size;
}
return true;
}
void VideoEncoder::CloseAudio(AVFormatContext *pContext, AVStream *pStream)
{
avcodec_close(pStream->codec);
if (pAudioEncodeBuffer)
{
av_free(pAudioEncodeBuffer);
pAudioEncodeBuffer = NULL;
}
nSizeAudioEncodeBuffer = 0;
}
bool VideoEncoder::AddVideoFrame(AVFrame * pOutputFrame, AVCodecContext *pVideoCodec)
{
bool res = false;
if (pFormatContext->oformat->flags & AVFMT_RAWPICTURE)
{
// Raw video case. The API will change slightly in the near
// futur for that.
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index = pVideoStream->index;
pkt.data= (uint8_t *) pOutputFrame;
pkt.size= sizeof(AVPicture);
res = av_interleaved_write_frame(pFormatContext, &pkt);
res = true;
}
else
{
// Encode
int nOutputSize = avcodec_encode_video(pVideoCodec, pVideoEncodeBuffer,
nSizeVideoEncodeBuffer, pOutputFrame);
if (nOutputSize > 0)
{
AVPacket pkt;
av_init_packet(&pkt);
if (pVideoCodec->coded_frame->pts != AV_NOPTS_VALUE)
{
pkt.pts = av_rescale_q(pVideoCodec->coded_frame->pts,
pVideoCodec->time_base, pVideoStream->time_base);
}
if(pVideoCodec->coded_frame->key_frame)
{
pkt.flags |= PKT_FLAG_KEY;
}
pkt.stream_index = pVideoStream->index;
pkt.data = pVideoEncodeBuffer;
pkt.size = nOutputSize;
// Write frame
res = (av_interleaved_write_frame(pFormatContext, &pkt) == 0);
}
else
{
res = false;
}
}
return res;
}
bool VideoEncoder::AddAudioSample(AVFormatContext *pFormatContext, AVStream *pStream,
const char* soundBuffer, int soundBufferSize)
{
AVCodecContext *pCodecCxt;
bool res = true;
pCodecCxt = pStream->codec;
memcpy(audioBuffer + nAudioBufferSizeCurrent, soundBuffer, soundBufferSize);
nAudioBufferSizeCurrent += soundBufferSize;
BYTE * pSoundBuffer = (BYTE *)audioBuffer;
int nCurrentSize = nAudioBufferSizeCurrent;
// Size of packet on bytes.
// FORMAT s16
DWORD packSizeInSize = 2 * audioInputSampleSize;
while(nCurrentSize >= packSizeInSize)
{
AVPacket pkt;
av_init_packet(&pkt);
pkt.size = avcodec_encode_audio(pCodecCxt, pAudioEncodeBuffer,
nSizeAudioEncodeBuffer, (const short *)pSoundBuffer);
if (pCodecCxt->coded_frame && pCodecCxt->coded_frame->pts != AV_NOPTS_VALUE)
{
pkt.pts = av_rescale_q(pCodecCxt->coded_frame->pts, pCodecCxt->time_base, pStream->time_base);
}
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index = pStream->index;
pkt.data = pAudioEncodeBuffer;
// Write the compressed frame in the media file.
if (av_interleaved_write_frame(pFormatContext, &pkt) != 0)
{
res = false;
break;
}
nCurrentSize -= packSizeInSize;
pSoundBuffer += packSizeInSize;
}
// save excess
memcpy(audioBuffer, audioBuffer + nAudioBufferSizeCurrent - nCurrentSize, nCurrentSize);
nAudioBufferSizeCurrent = nCurrentSize;
return res;
}
Run Code Online (Sandbox Code Playgroud)
(代码示例来自俄罗斯本文中的示例)
这一切VideoEncoder::是关于什么的?为什么要在H文件中声明tham而不是在cpp文件中创建所有类?
为什么在代码中仅使用#include"VideoEncoder.h"来声明VE类以及为什么刚才在VE.h中声明的函数能够完美地工作,即使不参考VE.cpp文件?
为何.cpp文件具有前面有如此多的功能,其根本原因VideoEncoder::是,.cpp文件包含了实现什么样的.h文件中声明的接口.在c ++中通常的做法是在头文件中声明类的功能,然后在单独的.cpp文件中实现它.
一个好处是,每当您决定更改类的实现细节(保持接口不变)时,您将只需重新编译.cpp文件.其他所有东西都不需要重新编译.开发人员还尝试最大限度地利用这种技术尝试在.h文件中尽可能少地声明.阅读有关pImpl模式的更多信息(阅读完好的c ++简介后).基本思想是.h文件中的信息越少,重新编译包含此头文件的文件所需的时间就越少.因此,您只需要输入.h文件函数,这些函数对于类的客户来说是绝对必要的,以便了解该类的所有内容.
至于为什么.h文件中声明的函数在没有引用.cpp文件的情况下工作得很好 - 这就是c ++程序编译的方式.人们说得对,你需要选择一本关于c ++的书并仔细研究它.无论如何,c ++程序的概念是程序由编译单元组成 - 可以独立编译的单个.cpp文件.您只需要#include您的个人.cpp文件正在使用的功能.编译器在此阶段对此感到满意.然而,还有另一个阶段 - 联系.在此阶段,链接器会检查是否存在可用的实际实现.它通过搜索.obj文件(由编译器生成)并评估是否存在与某个类函数相对应的条目来实现.如果该条目不可用,您将看到链接器错误(请注意,编译器不会报告任何错误,因为该函数已在某处声明,但未实现).