目錄
學(xué)習(xí)課題:逐步構(gòu)建開發(fā)播放器【QT5 + FFmpeg6 + SDL2】
步驟
AudioOutPut模塊
1、初始化【分配緩存、讀取信息】
2、開始線程工作【從隊(duì)列讀幀->重采樣->SDL回調(diào)->寫入音頻播放數(shù)據(jù)->SDL進(jìn)行播放】
主要代碼
分配緩存
// 對(duì)于樣本隊(duì)列
av_audio_fifo_alloc(playSampleFmt, playChannels, spec.samples * 5);
// 對(duì)于幀的音頻字節(jié)數(shù)據(jù)
// 首次計(jì)算幀大小,并且開辟緩沖區(qū)
maxOutSamples = (int) av_rescale_rnd(decCtxSamples, playSampleRate, srcSampleRate, AV_ROUND_UP);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, maxOutSamples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
重采樣相關(guān)
//配置重采樣器參數(shù)
swr_alloc_set_opts2(&swrContext,
&srcChannelLayout, playSampleFmt, playSampleRate,
&srcChannelLayout, AVSampleFormat(srcSampleFmt), srcSampleRate,
0, nullptr);
//初始化重采樣器
swr_init(swrContext);
//重采樣流程
// 計(jì)算重采樣后要輸出多少樣本數(shù)
delay = swr_get_delay(swrContext, sample_rate);
out_samples = (int) av_rescale_rnd(
nb_samples + delay,
playSampleRate,
sample_rate,
AV_ROUND_DOWN);
// 判斷預(yù)測(cè)的輸出樣本數(shù)是否>本次任務(wù)的最大樣本數(shù)
if (out_samples > maxOutSamples) {
// 釋放緩沖區(qū),重新初始化緩沖區(qū)大小
av_freep(&audioBuffer);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, out_samples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
maxOutSamples = out_samples;
}
playSamples = swr_convert(swrContext, &audioBuffer, out_samples, (const uint8_t **) frame->data, nb_samples);
SDL的音頻回調(diào)
// SDL音頻回調(diào)函數(shù)提供了一個(gè)回調(diào)接口,可以讓我們?cè)谝纛l設(shè)備需要數(shù)據(jù)的時(shí)候向里面寫入數(shù)據(jù)
// 從而進(jìn)行聲音播放
// 回調(diào)函數(shù)示例 函數(shù)名自定義,放在類中需要加靜態(tài)(static)
void AudioOutPut::AudioCallBackFunc(void *userdata, Uint8 *stream, int len) {
//userdata 是在初始化時(shí)賦值的,有時(shí)候會(huì)把類中"this"傳進(jìn)去
//stream 是音頻流,在回調(diào)函數(shù)中需要把音頻數(shù)據(jù)寫入到stream就可以實(shí)現(xiàn)聲音播放
//len是由SDL傳入的SDL緩沖區(qū)的大小,如果這個(gè)緩沖未滿,我們就一直往里填充數(shù)據(jù)
...
}
完整模塊
AudioOutPut
//AudioOutPut.h
#include "FFmpegHeader.h"
#include "SDL.h"
#include "queue/AVFrameQueue.h"
#include <QDebug>
#include <QObject>
#include <QtGui>
#include <QtWidgets>
#include <thread>
class AudioOutPut {
private:
std::thread *m_thread;
bool isStopped = true; // 是否已經(jīng)停止 停止時(shí)退出線程
bool isPlaying = false;// 是否正在播放
bool isPause = false; // 是否暫停
void run();
int resampleFrame(AVFrame *frame);
int sdlCallBackMode = 1;
QString url; //視頻地址
uint8_t *audioBuffer; //存儲(chǔ)解碼后音頻buffer
int audioBufferSize = 0;//buffer大小
int audioBufferIndex = 0;
SDL_mutex *mtx = nullptr;// 隊(duì)列鎖
SDL_AudioDeviceID audioDevice;
AVAudioFifo *fifo = nullptr;//Audio Buffer
AVFrameQueue *frameQueue; //解碼后的幀隊(duì)列
SwrContext *swrContext; //重采樣上下文
// 解碼器上下文
AVCodecContext *decCtx; // 音頻解碼器上下文
int srcChannels; // 源通道數(shù)
AVChannelLayout srcChannelLayout;// 源通道布局
enum AVSampleFormat srcSampleFmt;// 源采樣格式
int srcSampleRate; // 源音頻采樣率
// player
int maxOutSamples; // 最大樣本數(shù),用于計(jì)算緩存區(qū)大小
int playSamples; // 最終播放的樣本數(shù)
int playSampleRate;// 最終播放的音頻采樣率
enum AVSampleFormat playSampleFmt;
int playChannels;// 源通道數(shù)
public:
AudioOutPut(AVCodecContext *dec_ctx, AVFrameQueue *frame_queue);
int init(int mode = 1);
static void AudioCallBackFunc(void *userdata, Uint8 *stream, int len);
//SDL音頻回調(diào)函數(shù)實(shí)體普通版
void AudioCallBack(Uint8 *stream, int len);
//SDL音頻回調(diào)函數(shù)實(shí)體隊(duì)列版
void AudioCallBackFromQueue(Uint8 *stream, int len);
int start();
};
//AudioOutPut.cpp
#include "AudioOutPut.h"
AudioOutPut::AudioOutPut(AVCodecContext *dec_ctx, AVFrameQueue *frame_queue)
: decCtx(dec_ctx), frameQueue(frame_queue) {
srcSampleFmt = decCtx->sample_fmt;
srcSampleRate = decCtx->sample_rate;
srcChannelLayout = decCtx->ch_layout;
srcChannels = srcChannelLayout.nb_channels;
}
int AudioOutPut::init(int mode) {
sdlCallBackMode = mode;
// SDL init
if (SDL_Init(SDL_INIT_AUDIO) != 0) {
qDebug() << "SDL_INIT_AUDIO error";
return -1;
}
SDL_AudioSpec wanted_spec, spec;
wanted_spec.channels = decCtx->ch_layout.nb_channels;
wanted_spec.freq = decCtx->sample_rate;
SDL_AudioFormat sample_type;
switch (srcSampleFmt) {
case AV_SAMPLE_FMT_FLTP:
case AV_SAMPLE_FMT_FLT:
sample_type = AUDIO_F32SYS;
break;
case AV_SAMPLE_FMT_U8P:
case AV_SAMPLE_FMT_U8:
sample_type = AUDIO_U8;
break;
case AV_SAMPLE_FMT_S64P:
case AV_SAMPLE_FMT_S64:
case AV_SAMPLE_FMT_S32P:
case AV_SAMPLE_FMT_S32:
sample_type = AUDIO_S32SYS;
break;
case AV_SAMPLE_FMT_S16P:
case AV_SAMPLE_FMT_S16:
sample_type = AUDIO_S16SYS;
break;
default:
sample_type = AUDIO_S16SYS;
qDebug() << "不支持的采樣格式:AVSampleFormat(" << srcSampleFmt << ")";
}
wanted_spec.format = sample_type;
wanted_spec.silence = 0;
wanted_spec.callback = AudioCallBackFunc;
wanted_spec.userdata = this;
wanted_spec.samples = decCtx->frame_size;
int ret;
// ret = SDL_OpenAudio(&wanted_spec, &spec);
audioDevice = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_ANY_CHANGE);
if (audioDevice == 0) {
qDebug() << "SDL_OpenAudio error";
return -1;
}
playChannels = spec.channels;
playSampleRate = spec.freq;
playSampleFmt = av_get_packed_sample_fmt(srcSampleFmt);
if (mode == 1) {
fifo = av_audio_fifo_alloc(playSampleFmt, playChannels, spec.samples * 5);
}
ret = swr_alloc_set_opts2(&swrContext,
&srcChannelLayout, playSampleFmt, playSampleRate,
&srcChannelLayout, AVSampleFormat(srcSampleFmt), srcSampleRate,
0, nullptr);
if (ret != 0) {
qDebug() << "swr_alloc_set_opts2錯(cuò)誤";
return -1;
}
if (!swrContext) {
qDebug() << "創(chuàng)建音頻重采樣上下文錯(cuò)誤 swr_alloc";
return -1;
}
ret = swr_init(swrContext);
if (ret < 0) {
qDebug() << "初始化音頻重采樣上下文錯(cuò)誤 swr_init";
return -1;
}
// 解碼器上下文保存的幀樣本數(shù)
int decCtxSamples = 1024;
if (decCtx->frame_size > 1024) {
decCtxSamples = decCtx->frame_size;
}
// 首次計(jì)算幀大小,并且開辟緩沖區(qū)
maxOutSamples = (int) av_rescale_rnd(decCtxSamples, playSampleRate, srcSampleRate, AV_ROUND_UP);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, maxOutSamples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
return 1;
}
void AudioOutPut::AudioCallBackFunc(void *userdata, Uint8 *stream, int len) {
AudioOutPut *player = (AudioOutPut *) userdata;
if (player->sdlCallBackMode == 1) {
player->AudioCallBackFromQueue(stream, len);
} else {
player->AudioCallBack(stream, len);
}
}
void AudioOutPut::AudioCallBack(Uint8 *stream, int len) {
int len1;// sdl的內(nèi)部stream可用空間
/* len是由SDL傳入的SDL緩沖區(qū)的大小,如果這個(gè)緩沖未滿,我們就一直往里填充數(shù)據(jù) */
while (len > 0) {
/* audioBufferIndex 和 audioBufferSize 標(biāo)示我們自己用來放置解碼出來的數(shù)據(jù)的緩沖區(qū),*/
/* 這些數(shù)據(jù)待copy到SDL緩沖區(qū), 當(dāng)audioBufferIndex >= audioBufferSize的時(shí)候意味著我*/
/* 們的緩沖為空,沒有數(shù)據(jù)可供copy,這時(shí)候需要調(diào)用audio_decode_frame來解碼出更多的楨數(shù)據(jù) */
if (audioBufferIndex >= audioBufferSize) {
AVFrame *frame = frameQueue->pop(10);
if (frame) {
audioBufferSize = resampleFrame(frame);
/* audioBufferSize < 0 標(biāo)示沒能解碼出數(shù)據(jù),我們默認(rèn)播放靜音 */
if (audioBufferSize <= 0) {
/* silence */
audioBufferSize = 1024;
/* 清零,靜音 */
memset(audioBuffer, 0, audioBufferSize);
}
}
audioBufferIndex = 0;
}
/* 當(dāng)audioBufferIndex < audioBufferSize 查看stream可用空間,決定一次copy多少數(shù)據(jù),剩下的下次繼續(xù)copy */
len1 = audioBufferSize - audioBufferIndex;
// 可用空間>
if (len1 > len) {
len1 = len;
}
if (audioBuffer == nullptr) return;
memcpy(stream, (uint8_t *) audioBuffer + audioBufferIndex, len1);
len -= len1;
stream += len1;
audioBufferIndex += len1;
}
}
void AudioOutPut::AudioCallBackFromQueue(Uint8 *stream, int len) {
//由于AVAudioFifo非線程安全,且是子線程觸發(fā)此回調(diào),所以需要加鎖
SDL_LockMutex(mtx);
//讀取隊(duì)列中的音頻數(shù)據(jù)
av_audio_fifo_read(fifo, (void **) &stream, playSamples);
SDL_UnlockMutex(mtx);
}
int AudioOutPut::start() {
SDL_PauseAudioDevice(audioDevice, 0);
// SDL_PauseAudio(0);
if (sdlCallBackMode == 1) {
m_thread = new std::thread(&AudioOutPut::run, this);
if (!m_thread->joinable()) {
qDebug() << "AudioOutPut音頻幀處理線程創(chuàng)建失敗";
return -1;
}
}
isStopped = false;
isPlaying = true;
return 0;
}
void AudioOutPut::run() {
AVFrame *frame;
while (!isStopped) {
frame = frameQueue->pop(10);
if (frame) {
audioBufferSize = resampleFrame(frame);
while (true) {
SDL_LockMutex(mtx);
if (av_audio_fifo_space(fifo) >= playSamples) {
av_audio_fifo_write(fifo, (void **) &audioBuffer, playSamples);
SDL_UnlockMutex(mtx);
av_frame_unref(frame);
break;
}
SDL_UnlockMutex(mtx);
//隊(duì)列可用空間不足則延時(shí)等待
SDL_Delay((double) playSamples / playSampleRate);
}
}
}
}
int AudioOutPut::resampleFrame(AVFrame *frame) {
int64_t delay; // 重采樣后延遲
int out_samples;// 預(yù)測(cè)的重采樣后的輸出樣本數(shù)
int sample_rate;// 幀原采樣率
int nb_samples; // 幀原樣本數(shù)
sample_rate = frame->sample_rate;
nb_samples = frame->nb_samples;
// 計(jì)算重采樣后要輸出多少樣本數(shù)
delay = swr_get_delay(swrContext, sample_rate);
out_samples = (int) av_rescale_rnd(
nb_samples + delay,
playSampleRate,
sample_rate,
AV_ROUND_DOWN);
// 判斷預(yù)測(cè)的輸出樣本數(shù)是否>本次任務(wù)的最大樣本數(shù)
if (out_samples > maxOutSamples) {
// 釋放緩沖區(qū),重新初始化緩沖區(qū)大小
av_freep(&audioBuffer);
audioBufferSize = av_samples_get_buffer_size(nullptr, srcChannels, out_samples, playSampleFmt, 0);
audioBuffer = (uint8_t *) av_malloc(audioBufferSize);
maxOutSamples = out_samples;
}
playSamples = swr_convert(swrContext, &audioBuffer, out_samples, (const uint8_t **) frame->data, nb_samples);
if (playSamples <= 0) {
return -1;
}
return av_samples_get_buffer_size(nullptr, srcChannels, playSamples, playSampleFmt, 1);
}
PlayerMain
添加音頻輸出代碼
AudioOutPut *audioOutPut;
audioOutPut = new AudioOutPut(audioDecodeThread->dec_ctx, &audioFrameQueue);
audioOutPut->init(1);
audioOutPut->start();
測(cè)試運(yùn)行結(jié)果
如果需要同時(shí)執(zhí)行視頻和音頻的輸出,記得要在解復(fù)用模塊那把限制隊(duì)列大小的位置把視頻隊(duì)列的大小限制給去掉。
目前只是實(shí)現(xiàn)了音頻播放和視頻渲染顯示畫面,但是可以看到音頻和視頻是不同步的,下一章我們就要讓音頻和視頻同步起來。文章來源:http://www.zghlxwxcb.cn/news/detail-830652.html
播放器開發(fā)(六):音頻幀處理并用SDL播放結(jié)果文章來源地址http://www.zghlxwxcb.cn/news/detail-830652.html
到了這里,關(guān)于播放器開發(fā)(六):音頻幀處理并用SDL播放的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!