音频解析
音频解码是指将压缩的音频数据转换为可以再生的PCM(脉冲编码调制)数据的过程。
FFmpeg音频解码的基本步骤如下:
- 初始化FFmpeg解码器(4.0版本后可省略):
调用av_register_all()初始化编解码器。调用avcodec_register_all()注册所有编解码器。
- 打开输入的音频流:
使用avformat_open_input()函数来读取和打开音频文件。
使用avformat_find_stream_info()函数获取流信息。 - 查找音频流:
检索音频流的索AVMEDIA_TYPE_AUDIO。
使用av_find_best_stream()找到第一个音频流并记下它的index。 - 打开对应的解码器:
查找音频流对应的解解码器avcodec_find_decoder()。
使用avcodec_open2()函数来打开解码器。 - 读取音频包解码:
遍历音频数据,读取音频包(AVPacket)。
使用av_read_frame()来读取。
检查包是否属于所需的音频流。 - 将音频包送入解码器:
使用avcodec_send_packet()将包送入解码器准备解码。
- 从解码器读取解码后的音频帧:
使用avcodec_receive_frame()获取解码后的帧(AVFrame)。
继续从解码器获取所有解码后的帧直到返回EAGAIN或错误。 - 转换音频格式 (可选):
如果需要,将音频数据转换成不同的格式或采样率,可以使用’libswresample’或者’libavresample’。
- 后处理 (可选):
对解码的音频进行必要的后处理,比如音量调整、混音等。
- 清理和资源释放:
关闭解码器。
关闭音频文件。
释放所有使用过的AVFrame和AVPacket。
释放编解码上下文等。
视频解析
视频解码的流程目的是将压缩的视频数据流转换成解码后的原始视频帧(通常是YUV或RGB格式)。
FFmpeg视频解码的基本步骤如下:
- 初始化FFmpeg解码器(4.0版本后可省略):
调用av_register_all()初始化编解码器。调用avcodec_register_all()注册所有编解码器。
- 打开输入的视频流:
使用avformat_open_input()函数来读取和打开音频文件。
使用avformat_find_stream_info()函数获取流信息。 - 查找视频流:
检索视频流的索AVMEDIA_TYPE_VIDEO。
使用av_find_best_stream()找到第一个视频流并记下它的index。 - 打开对应的解码器:
查找视频流对应的解解码器avcodec_find_decoder()。
使用avcodec_open2()函数来打开解码器。 - 读取视频流包解码:
通过av_read_frame()从媒体文件中读取视频数据(AVPacket)。
考虑只处理我们之前记下的视频流索引对应的包。 - 发送数据到解码器:
使用avcodec_send_packet()将数据包送入解码器准备解码。
- 从解码器读取解码后的视频帧:
使用avcodec_receive_frame()从解码器中获取解码后的视频帧(AVFrame)。
需要循环重复此过程以获取所有解码后的帧。 - 视频帧处理 (可选):
将解码的视频帧转换成需要的格式或进行处理,可以使用libswscale来进行格式转换或调整尺寸。
- 帧率控制 (可选):
根据视频的PTS(Presentation Time Stamp)来处理帧率,确保视频按正确的速率播放。
- 清理和资源释放:
释放已分配的AVCodecContext和AVFormatContext。
释放使用过的AVFrame和AVPacket。
关闭视频流和网络库(如果初始化了)。
视频流解析代码
decoder.h
#ifndef DECODER_H
#define DECODER_H#include <QThread>
#include <QImage>extern "C"
{
//#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/pixfmt.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "libavutil/imgutils.h"
}#include "audiodecoder.h"class Decoder : public QThread
{Q_OBJECTpublic:enum PlayState {STOP,PAUSE,PLAYING,FINISH};explicit Decoder();~Decoder();double getCurrentTime();void seekProgress(qint64 pos);int getVolume();void setVolume(int volume);private:void run();void clearData();void setPlayState(Decoder::PlayState state);void displayVideo(QImage image);static int videoThread(void *arg);double synchronize(AVFrame *frame, double pts);bool isRealtime(AVFormatContext *pFormatCtx);int initFilter();int fileType;int videoIndex;int audioIndex;int subtitleIndex;QString currentFile;QString currentType;qint64 timeTotal;AVPacket seekPacket;qint64 seekPos;double seekTime;PlayState playState;bool isStop;bool gotStop;bool isPause;bool isSeek;bool isReadFinished;bool isDecodeFinished;AVFormatContext *pFormatCtx;AVCodecContext *pCodecCtx; // video codec contextAvPacketQueue videoQueue;AvPacketQueue subtitleQueue;AVStream *videoStream;double videoClk; // video frame timestampAudioDecoder *audioDecoder;AVFilterGraph *filterGraph;AVFilterContext *filterSinkCxt;AVFilterContext *filterSrcCxt;public slots:void decoderFile(QString file, QString type);void stopVideo();void pauseVideo();void audioFinished();signals:void readFinished();void gotVideo(QImage image);void gotVideoTime(qint64 time);void playStateChanged(Decoder::PlayState state);};#endif // DECODER_H
decoder.cpp
#include <QDebug>#include "decoder.h"Decoder::Decoder() :timeTotal(0),playState(STOP),isStop(false),isPause(false),isSeek(false),isReadFinished(false),audioDecoder(new AudioDecoder),filterGraph(NULL)
{av_init_packet(&seekPacket);seekPacket.data = (uint8_t *)"FLUSH";connect(audioDecoder, SIGNAL(playFinished()), this, SLOT(audioFinished()));connect(this, SIGNAL(readFinished()), audioDecoder, SLOT(readFileFinished()));
}Decoder::~Decoder()
{}void Decoder::displayVideo(QImage image)
{emit gotVideo(image);
}void Decoder::clearData()
{videoIndex = -1,audioIndex = -1,subtitleIndex = -1,timeTotal = 0;isStop = false;isPause = false;isSeek = false;isReadFinished = false;isDecodeFinished = false;videoQueue.empty();audioDecoder->emptyAudioData();videoClk = 0;
}void Decoder::setPlayState(Decoder::PlayState state)
{
// qDebug() << "Set state: " << state;emit playStateChanged(state);playState = state;
}bool Decoder::isRealtime(AVFormatContext *pFormatCtx)
{if (!strcmp(pFormatCtx->iformat->name, "rtp")|| !strcmp(pFormatCtx->iformat->name, "rtsp")|| !strcmp(pFormatCtx->iformat->name, "sdp")) {return true;}// if(pFormatCtx->pb && (!strncmp(pFormatCtx->filename, "rtp:", 4)// || !strncmp(pFormatCtx->filename, "udp:", 4)// )) {// return true;// }return false;
}int Decoder::initFilter()
{int ret;AVFilterInOut *out = avfilter_inout_alloc();AVFilterInOut *in = avfilter_inout_alloc();/* output format */enum AVPixelFormat pixFmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};/* free last graph */if (filterGraph) {avfilter_graph_free(&filterGraph);}filterGraph = avfilter_graph_alloc();/* just add filter ouptut format rgb32,* use for function avfilter_graph_parse_ptr()*/QString filter("pp=hb/vb/dr/al");QString args = QString("video_size=%1x%2:pix_fmt=%3:time_base=%4/%5:pixel_aspect=%6/%7").arg(pCodecCtx->width).arg(pCodecCtx->height).arg(pCodecCtx->pix_fmt).arg(videoStream->time_base.num).arg(videoStream->time_base.den).arg(pCodecCtx->sample_aspect_ratio.num).arg(pCodecCtx->sample_aspect_ratio.den);/* create source filter */ret = avfilter_graph_create_filter(&filterSrcCxt, avfilter_get_by_name("buffer"), "in", args.toLocal8Bit().data(), NULL, filterGraph);if (ret < 0) {qDebug() << "avfilter graph create filter failed, ret:" << ret;avfilter_graph_free(&filterGraph);goto out;}/* create sink filter */ret = avfilter_graph_create_filter(&filterSinkCxt, avfilter_get_by_name("buffersink"), "out", NULL, NULL, filterGraph);if (ret < 0) {qDebug() << "avfilter graph create filter failed, ret:" << ret;avfilter_graph_free(&filterGraph);goto out;}/* set sink filter ouput format */ret = av_opt_set_int_list(filterSinkCxt, "pix_fmts", pixFmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);if (ret < 0) {qDebug() << "av opt set int list failed, ret:" << ret;avfilter_graph_free(&filterGraph);goto out;}out->name = av_strdup("in");out->filter_ctx = filterSrcCxt;out->pad_idx = 0;out->next = NULL;in->name = av_strdup("out");in->filter_ctx = filterSinkCxt;in->pad_idx = 0;in->next = NULL;if (filter.isEmpty() || filter.isNull()) {/* if no filter to add, just link source & sink */ret = avfilter_link(filterSrcCxt, 0, filterSinkCxt, 0);if (ret < 0) {qDebug() << "avfilter link failed, ret:" << ret;avfilter_graph_free(&filterGraph);goto out;}} else {/* add filter to graph */ret = avfilter_graph_parse_ptr(filterGraph, filter.toLatin1().data(), &in, &out, NULL);if (ret < 0) {qDebug() << "avfilter graph parse ptr failed, ret:" << ret;avfilter_graph_free(&filterGraph);goto out;}}/* check validity and configure all the links and formats in the graph */if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0) {qDebug() << "avfilter graph config failed, ret:" << ret;avfilter_graph_free(&filterGraph);}out:avfilter_inout_free(&out);avfilter_inout_free(&in);return ret;
}void Decoder::decoderFile(QString file, QString type)
{
// qDebug() << "Current state:" << playState;qDebug() << "File name:" << file << ", type:" << type;if (playState != STOP) {isStop = true;while (playState != STOP) {SDL_Delay(10);}SDL_Delay(100);}clearData();SDL_Delay(100);currentFile = file;currentType = type;this->start();
}void Decoder::audioFinished()
{isStop = true;if (currentType == "music") {SDL_Delay(100);emit playStateChanged(Decoder::FINISH);}
}void Decoder::stopVideo()
{if (playState == STOP) {setPlayState(Decoder::STOP);return;}gotStop = true;isStop = true;audioDecoder->stopAudio();if (currentType == "video") {/* wait for decoding & reading stop */while (!isReadFinished || !isDecodeFinished) {SDL_Delay(10);}} else {while (!isReadFinished) {SDL_Delay(10);}}
}void Decoder::pauseVideo()
{if (playState == STOP) {return;}isPause = !isPause;audioDecoder->pauseAudio(isPause);if (isPause) {av_read_pause(pFormatCtx);setPlayState(PAUSE);} else {av_read_play(pFormatCtx);setPlayState(PLAYING);}
}int Decoder::getVolume()
{return audioDecoder->getVolume();
}void Decoder::setVolume(int volume)
{audioDecoder->setVolume(volume);
}double Decoder::getCurrentTime()
{if (audioIndex >= 0) {return audioDecoder->getAudioClock();}return 0;
}void Decoder::seekProgress(qint64 pos)
{if (!isSeek) {seekPos = pos;isSeek = true;}
}double Decoder::synchronize(AVFrame *frame, double pts)
{double delay;if (pts != 0) {videoClk = pts; // Get pts,then set video clock to it} else {pts = vi