今天分享一下利用qt錄制音頻,然后再利用ffmpeg推流到nginx服務(wù)器,最后再利用vlc進(jìn)行拉流的demo。
首先介紹一下如何利用qt來(lái)進(jìn)行音頻的錄制,qt的音頻錄制主要利用qt的QAudioFormat先進(jìn)行音頻信息的配置。主要需要配置以下的信息:
QAudioFormat fmt;
fmt.setSampleRate(sampleRate);// 采樣率, 一秒采集音頻樣本數(shù)量,常設(shè)置為44100
fmt.setChannelCount(channels); // 音頻通道數(shù)
fmt.setSampleSize(16); //一個(gè)音頻數(shù)據(jù)大小
fmt.setCodec("audio/pcm"); //編碼方式,大多聲卡只支持pcm,也可以通過(guò)獲取參數(shù)得到聲卡支持參數(shù)
fmt.setByteOrder(QAudioFormat::LittleEndian); // 小端 存儲(chǔ)還是大端存儲(chǔ)
fmt.setSampleType(QAudioFormat::UnSignedInt); // 數(shù)據(jù)類型,對(duì)應(yīng)的是16位
然后使用QAudioDeviceInfo來(lái)獲取是否支持改設(shè)置信息,如果不支持的話就取其最近的配置。
QAudioDeviceInfo info = QAudioDeviceInfo::defaultInputDevice();
if (!info.isFormatSupported(fmt))
{
cout << "Audio format not support!" << endl;
fmt = info.nearestFormat(fmt);
}
然后再利用QIODevice開(kāi)始錄制音頻,具體的讀取方式如下所示。
//一次讀取一幀音頻 由于這種方式讀取不準(zhǔn)確,所以僅用來(lái)進(jìn)行判斷
if (input->bytesReady() < readSize)
{
QThread::msleep(1);
continue;
}
int size = 0;
while (size != readSize)
{
int len = io->read(buf + size, readSize - size);
if (len < 0)break;
size += len;
}
if (size != readSize)continue;
到這里利用qt進(jìn)行音頻的錄制就完成了,接下來(lái)是利用ffmpeg進(jìn)行推流,推流的調(diào)用的函數(shù)和之前視頻推流調(diào)用的api一致,只是一些參數(shù)的配置進(jìn)行了改變。
由于推流時(shí)音頻的格式是AV_SAMPLE_FMT_FLTP,與qt采集到的格式AV_SAMPLE_FMT_S16不一致,所以需要對(duì)采集到的音頻進(jìn)行重采樣。
這邊利用的是ffmpeg的api函數(shù)來(lái)進(jìn)行重采樣,首先需要初始化重采樣的上下文,主要利用
swr_alloc_set_opts()函數(shù),設(shè)置參數(shù)如下:
SwrContext *asc = NULL;
asc = swr_alloc_set_opts(asc,
av_get_default_channel_layout(channels), AV_SAMPLE_FMT_FLTP, sampleRate, //輸出格式
av_get_default_channel_layout(channels), AV_SAMPLE_FMT_S16, sampleRate,//輸入格式
0,0);
if (!asc)
{
cout << "swr_alloc_set_opts failed!"<<endl;
getchar();
return -1;
}
主要是配置輸出和輸入的參數(shù),接著利用swr_init()初始化該上下文。
然后分配好音頻重采樣的輸出空間,配置信息如下:
AVFrame *pcm = av_frame_alloc();
pcm->format = outSampleFmt;
pcm->channels = channels;
pcm->channel_layout = av_get_default_channel_layout(channels);
pcm->nb_samples = 1024;//一幀音頻一通道的采樣數(shù)量
ret = av_frame_get_buffer(pcm, 0); //給pcm分配存儲(chǔ)空間
if (ret != 0)
{
char err[1024] = { 0 };
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
getchar();
return -1;
}
然后利用swr_convert()函數(shù)進(jìn)行轉(zhuǎn)化將轉(zhuǎn)化后的數(shù)據(jù)放入pcm中。
接下來(lái)需要對(duì)音頻的pts進(jìn)行運(yùn)算,主要運(yùn)算如下:
//pts運(yùn)算
//nb_sample/sample_rate =一幀音頻的秒數(shù)
//time_base pts=sec * timebase.den
pcm->pts = apts;
apts += av_rescale_q(pcm->nb_samples, {1,sampleRate},ac->time_base);
接下來(lái)就是對(duì)重采樣后的音頻進(jìn)行推流,這邊與之前視頻推流的一致,具體的參數(shù)配置可以參考我下面分享的代碼:
#include <QtCore/QCoreApplication>
#include <QAudioInput>
#include <QThread>
#include <iostream>
extern "C"
{
#include "libswresample/swresample.h"
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
}
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"avutil.lib")
#pragma comment(lib, "avcodec.lib")
using namespace std;
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
//注冊(cè)所有的編解碼器
avcodec_register_all();
//注冊(cè)所有的封裝器
av_register_all();
//注冊(cè)所有的網(wǎng)絡(luò)協(xié)議
avformat_network_init();
char *outUrl = "rtmp://192.168.198.128/live";
int sampleRate = 44100;
int channels = 2;
int sampleByte = 2;
AVSampleFormat inSampleFmt = AV_SAMPLE_FMT_S16;
AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_FLTP;
///1 qt音頻開(kāi)始錄制
QAudioFormat fmt;
//采樣頻率
fmt.setSampleRate(sampleRate);
//通道數(shù)量
fmt.setChannelCount(channels);
//樣本大小
fmt.setSampleSize(sampleByte *8);
//格式
fmt.setCodec("audio/pcm");
//字節(jié)序
fmt.setByteOrder(QAudioFormat::LittleEndian);
fmt.setSampleType(QAudioFormat::UnSignedInt);
QAudioDeviceInfo info = QAudioDeviceInfo::defaultInputDevice();
if (!info.isFormatSupported(fmt))
{
cout << "Audio format not support!" << endl;
fmt = info.nearestFormat(fmt);
}
cout << "Audio format success" << endl;
QAudioInput *input = new QAudioInput(fmt);
//開(kāi)始錄制音頻
QIODevice *io= input->start();
///2 音頻重采樣
SwrContext *asc = NULL;
asc = swr_alloc_set_opts(asc,
av_get_default_channel_layout(channels), AV_SAMPLE_FMT_FLTP, sampleRate, //輸出格式
av_get_default_channel_layout(channels), AV_SAMPLE_FMT_S16, sampleRate,//輸入格式
0,0);
if (!asc)
{
cout << "swr_alloc_set_opts failed!"<<endl;
getchar();
return -1;
}
int ret = swr_init(asc);
if (ret != 0)
{
char err[1024] = { 0 };
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
getchar();
return -1;
}
cout << "音頻重采樣 上下文初始化成功" << endl;
///3 音頻重采樣輸出空間分配
AVFrame *pcm = av_frame_alloc();
pcm->format = outSampleFmt;
pcm->channels = channels;
pcm->channel_layout = av_get_default_channel_layout(channels);
pcm->nb_samples = 1024;//一幀音頻一通道的采樣數(shù)量
ret = av_frame_get_buffer(pcm, 0); //給pcm分配存儲(chǔ)空間
if (ret != 0)
{
char err[1024] = { 0 };
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
getchar();
return -1;
}
///4 初始化音頻編碼器
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!codec)
{
cout << "avcodec_find_encoder failed!" << endl;
getchar();
return -1;
}
//音頻編碼器上下文
AVCodecContext *ac = avcodec_alloc_context3(codec);
if (!ac)
{
cout << "avcodec_alloc_context3 failed!" << endl;
getchar();
return -1;
}
cout << "avcodec_alloc_context3 success!" << endl;
ac->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
ac->thread_count = 8;
//音頻的參數(shù)
ac->bit_rate = 40000;
ac->sample_rate = sampleRate;
ac->sample_fmt = AV_SAMPLE_FMT_FLTP;
ac->channels = channels;
ac->channel_layout = av_get_default_channel_layout(channels);
//打開(kāi)編碼器
ret = avcodec_open2(ac, 0, 0);
if (ret != 0)
{
char err[1024] = { 0 };
av_strerror(ret, err, sizeof(err) - 1);
cout << err << endl;
getchar();
return -1;
}
cout << "avcodec_open2 success!" << endl;
///5 封裝器和音頻流配置
//a.創(chuàng)建輸出封裝器上下文
AVFormatContext *ic = NULL;
ret = avformat_alloc_output_context2(&ic, 0, "flv", outUrl);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
cout << buf << endl;
getchar();
return -1;
}
cout << "avformat_alloc_output_context2 success!" << endl;
//b.添加音頻流
AVStream *as = avformat_new_stream(ic, NULL);
if (!as)
{
throw exception("avformat_new_stream failed!");
}
cout << "avformat_new_stream success!" << endl;
as->codecpar->codec_tag = 0;
//從編碼器復(fù)制參數(shù)
avcodec_parameters_from_context(as->codecpar, ac);
av_dump_format(ic, 0, outUrl, 1);
///6 打開(kāi)rtmp的網(wǎng)絡(luò)輸出io
ret = avio_open(&ic->pb, outUrl, AVIO_FLAG_WRITE);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
cout << buf << endl;
getchar();
return -1;
}
//寫入封裝頭
ret = avformat_write_header(ic, NULL);
if (ret != 0)
{
char buf[1024] = { 0 };
av_strerror(ret, buf, sizeof(buf) - 1);
cout << buf << endl;
getchar();
return -1;
}
cout << "avformat_write_header success!" << endl;
//一次讀取一幀音頻的字節(jié)數(shù)
int readSize = pcm->nb_samples*channels*sampleByte;
char *buf = new char[readSize];
int apts = 0;
AVPacket pkt = {0};
for (;;)
{
//一次讀取一幀音頻
if (input->bytesReady() < readSize)
{
QThread::msleep(1);
continue;
}
int size = 0;
while (size != readSize)
{
int len = io->read(buf + size, readSize - size);
if (len < 0)break;
size += len;
}
if (size != readSize)continue;
const uint8_t *indata[AV_NUM_DATA_POINTERS] = { 0 };
indata[0] = (uint8_t *)buf;
//已經(jīng)讀取一幀源數(shù)據(jù)
//重采樣數(shù)據(jù)
int len = swr_convert(asc, pcm->data, pcm->nb_samples,//輸出參數(shù),輸出存儲(chǔ)地址,樣本數(shù)
indata, pcm->nb_samples
);
//pts運(yùn)算
//nb_sample/sample_rate =一幀音頻的秒數(shù)
//time_base pts=sec * timebase.den
pcm->pts = apts;
apts += av_rescale_q(pcm->nb_samples, {1,sampleRate},ac->time_base);
int ret = avcodec_send_frame(ac, pcm);
if (ret != 0)continue;
av_packet_unref(&pkt);
ret = avcodec_receive_packet(ac, &pkt);
cout << "avcodec_receive_packet " << ret << endl;
if (ret != 0)continue;
cout << pkt.size << " " << flush;
//推流
pkt.pts = av_rescale_q(pkt.pts, ac->time_base, as->time_base);
pkt.dts = av_rescale_q(pkt.dts, ac->time_base, as->time_base);
pkt.duration= av_rescale_q(pkt.duration, ac->time_base, as->time_base);
ret = av_interleaved_write_frame(ic, &pkt);
if (ret == 0)
{
cout << "#" << flush;
}
}
delete buf;
getchar();
return a.exec();
}
然后在這邊做個(gè)分享,由于我在第一次在函數(shù)?avcodec_receive_packe()沒(méi)有接收返回值,所以導(dǎo)致出現(xiàn)dump的情況,根據(jù)下面的打印可以發(fā)現(xiàn)第一次接收數(shù)據(jù)的時(shí)候返回的是-11,說(shuō)明從緩存區(qū)獲取到的數(shù)據(jù)是有問(wèn)題的,下面還對(duì)其進(jìn)行推流就會(huì)出現(xiàn)錯(cuò)誤的情況,具體的情況分析可以參考這篇文章關(guān)于FFmpeg編碼時(shí),avcodec_receive_packet返回-11的解決辦法_小小菜鳥(niǎo)少少煩惱的博客-CSDN博客_avcodec_receive_packet
文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-433942.html
?文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-433942.html
到了這里,關(guān)于音視頻開(kāi)發(fā)系列(10):基于qt的音頻推流的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!