上節(jié)課我們已經(jīng)拿到了攝像頭數(shù)據(jù)和麥克風(fēng)數(shù)據(jù),這節(jié)課我們來(lái)看一下如何將二者合并起來(lái)推送到rtmp服務(wù)器。推送音視頻合成流到rtmp服務(wù)器地址的流程如下:
1.創(chuàng)建輸出流
//初始化輸出流上下文
avformat_alloc_output_context2(&outFormatCtx, NULL, "flv", outFileName);
outFormat = outFormatCtx->oformat;
outFormat->video_codec = AV_CODEC_ID_H264;
outFormat->audio_codec = AV_CODEC_ID_AAC;
2.創(chuàng)建視頻編碼器
vEncodec = avcodec_find_encoder(AV_CODEC_ID_H264);
vEncodeCtx = avcodec_alloc_context3(vEncodec);
vEncodeCtx->codec_id = vEncodec->id;
vEncodeCtx->codec_type = AVMEDIA_TYPE_VIDEO;
vEncodeCtx->bit_rate = 1000000;
vEncodeCtx->width = backWidth;
vEncodeCtx->height = backHeight;
vEncodeCtx->time_base = { 1, 25 };
vEncodeCtx->framerate = { 25, 1 };
vEncodeCtx->gop_size = 25 * 10;
vEncodeCtx->pix_fmt = *vEncodec->pix_fmts;
vEncodeCtx->max_b_frames = 0;
vEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_dict_set(&vEncodeOpts, "profile", "baseline", 0);
av_dict_set(&vEncodeOpts, "preset", "ultrafast", 0);
av_dict_set(&vEncodeOpts, "tune", "zerolatency", 0);
avcodec_open2(vEncodeCtx, vEncodec, &vEncodeOpts);
3.創(chuàng)建音頻編碼器
//音頻編碼器
aEncodec = avcodec_find_encoder(AV_CODEC_ID_AAC);;
aEncodeCtx = avcodec_alloc_context3(aEncodec);
aEncodeCtx->bit_rate = 64000;
aEncodeCtx->sample_rate = 44100;
aEncodeCtx->block_align = 0;
aEncodeCtx->sample_fmt = AV_SAMPLE_FMT_FLTP;
aEncodeCtx->channel_layout = AV_CH_LAYOUT_STEREO;
aEncodeCtx->channels = 2;
aEncodeCtx->time_base.num = 1;
aEncodeCtx->time_base.den = aEncodeCtx->sample_rate;
aEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(aEncodeCtx, aEncodec, NULL);
4.創(chuàng)建輸出視頻流
//創(chuàng)建輸出視頻流
videoStream = avformat_new_stream(outFormatCtx, vEncodec);
videoStream->id = outFormatCtx->nb_streams - 1;
videoStream->codecpar->codec_tag = 0;
avcodec_parameters_from_context(videoStream->codecpar, vEncodeCtx);
5.創(chuàng)建輸出音頻流
//創(chuàng)建輸出音頻流
audioStream = avformat_new_stream(outFormatCtx, NULL);
audioStream->codecpar->codec_tag = 0;
audioStream->id = outFormatCtx->nb_streams - 1;
avcodec_parameters_from_context(audioStream->codecpar, aEncodeCtx);
6.打開輸出流并寫入文件頭
//打開輸出流
av_dump_format(outFormatCtx, 0, outFileName, 1);
ret = avio_open2(&outFormatCtx->pb, outFileName, AVIO_FLAG_READ_WRITE, nullptr, nullptr);
//寫文件頭
ret = avformat_write_header(outFormatCtx, NULL);
7.分別封裝音視頻幀并送入編碼器
ret = avcodec_send_frame(vEncodeCtx, deVideoFrame);
ret = avcodec_receive_packet(vEncodeCtx, &enVideoPacket);
ret = avcodec_send_frame(aEncodeCtx, deAudioFrame);
ret = avcodec_receive_packet(aEncodeCtx, &enAudioPacket);
8.分別推送音視頻流
if (enVideoPacket.size > 0){
ret = av_interleaved_write_frame(outFormatCtx, &enVideoPacket);
}
if (enAudioPacket.size > 0){
ret = av_interleaved_write_frame(outFormatCtx, &enAudioPacket);
}
9.播放測(cè)試rtmp流
打開vlc或第一章寫好的rtmp播放器測(cè)試,對(duì)著麥克風(fēng)說(shuō)話如果能從播放器聽到聲音并看到圖像說(shuō)明成功。文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-774928.html
文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-774928.html
到了這里,關(guān)于第7課 利用FFmpeg將攝像頭畫面與麥克風(fēng)數(shù)據(jù)合成后推送到rtmp服務(wù)器的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!