日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問(wèn) 生活随笔!

生活随笔

當(dāng)前位置: 首頁(yè) > 编程资源 > 编程问答 >内容正文

编程问答

FFmpeg基础:音视频同步播放

發(fā)布時(shí)間:2023/12/29 编程问答 25 豆豆
生活随笔 收集整理的這篇文章主要介紹了 FFmpeg基础:音视频同步播放 小編覺(jué)得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.

文章目錄

    • 定義全局?jǐn)?shù)據(jù)類(lèi)
    • 定義數(shù)據(jù)隊(duì)列
    • 定義SDL庫(kù)初始化操作
    • 定義音視頻流解析函數(shù)
    • 定義解封裝線程和視頻解碼線程
    • 定義音視頻的解碼函數(shù)
    • 主函數(shù)事件響應(yīng)

視頻文件解復(fù)用之后視頻流和音頻流是獨(dú)立的,也是獨(dú)立播放。由于壓縮方式不同,數(shù)據(jù)格式不同,在播放的時(shí)候音頻流輸出是線性的而視頻流輸出不是線程的,這就會(huì)導(dǎo)致視頻流和音頻流的時(shí)間偏差越來(lái)越大,最終導(dǎo)致音視頻不同步。

為了解決這個(gè)問(wèn)題,我們?cè)诓シ乓曨l文件的時(shí)候需要調(diào)整音頻或者視頻的播放速度,來(lái)實(shí)現(xiàn)兩種數(shù)據(jù)的同步。考慮到人對(duì)聲音的敏感度要強(qiáng)于視頻,頻繁調(diào)節(jié)音頻會(huì)帶來(lái)較差的觀感體驗(yàn),且音頻的播放時(shí)鐘為線性增長(zhǎng),所以一般會(huì)以音頻時(shí)鐘為參考時(shí)鐘,將視頻同步到音頻上。

這里以一個(gè)將視頻流同步到音頻流上的例子來(lái)說(shuō)明一下音視頻同步的實(shí)現(xiàn)方式。程序的架構(gòu)圖如下圖所示:

定義全局?jǐn)?shù)據(jù)類(lèi)

首先定義全局?jǐn)?shù)據(jù)類(lèi),用于不同線程之間的數(shù)據(jù)共享。主要數(shù)據(jù)結(jié)構(gòu)VideoState記錄了音視頻文件的各種上下文參數(shù)。

//define.h #ifndef _DEFINE_H_ #define _DEFINE_H_#include <stdio.h> #include <assert.h> #include <math.h>#include <SDL.h> extern "C" { #include <libavcodec/avcodec.h> #include <libavdevice/avdevice.h> #include <libavfilter/avfilter.h> #include <libavformat/avformat.h> #include <libavformat/avio.h> #include <libavutil/avutil.h> #include <libswresample/swresample.h> #include <libswscale/swscale.h> #include <libavutil/frame.h> #include <libavutil/imgutils.h> #include <libavformat/avformat.h> #include <libavutil/time.h> } #include <iostream>#define SDL_AUDIO_BUFFER_SIZE 1024 #define MAX_AUDIO_FRAME_SIZE 192000#define MAX_AUDIOQ_SIZE (5 * 16 * 1024) #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)#define AV_SYNC_THRESHOLD 0.01 #define AV_NOSYNC_THRESHOLD 10.0#define FF_REFRESH_EVENT (SDL_USEREVENT) #define FF_QUIT_EVENT (SDL_USEREVENT + 1)#define VIDEO_PICTURE_QUEUE_SIZE 1typedef struct PacketQueue {AVPacketList *first_pkt, *last_pkt;int nb_packets;int size;SDL_mutex *mutex;SDL_cond *cond; } PacketQueue;typedef struct VideoState {AVFormatContext *pFormatCtx; //音視頻的上下文int videoStreamIndex; //視頻流索引int audioStreamIndex; //音頻流索引AVStream *audio_st; //音頻流指針AVCodecContext *audio_ctx; //音頻流上下文PacketQueue audioq; //音頻流隊(duì)列//音頻緩存uint8_t audio_buf[192000 * 3 / 2];unsigned int audio_buf_size; //緩存大小unsigned int audio_buf_index; //緩存索引AVFrame audio_frame; //音頻幀AVPacket audio_pkt; //音頻包uint8_t *audio_pkt_data; //音頻數(shù)據(jù)指針 int audio_pkt_size; //音頻數(shù)據(jù)包大小int audio_hw_buf_size;struct SwrContext *audio_swr_ctx; //音頻處理操作類(lèi)//音視頻的數(shù)據(jù)幀double audio_clock;double video_clock;double frame_timer;int64_t frame_last_pts;int64_t frame_last_delay;AVStream *video_st; //視頻流AVCodecContext *video_ctx; //視頻流上下文PacketQueue videoq; //視頻數(shù)據(jù)隊(duì)列struct SwsContext *video_sws_ctx; //視頻操作上下文//視頻幀數(shù)據(jù)隊(duì)列AVFrame pictq[VIDEO_PICTURE_QUEUE_SIZE];int pictq_size;int pictq_rindex;int pictq_windex;//操作數(shù)據(jù)幀的鎖和信號(hào)量SDL_mutex *pictq_mutex;SDL_cond *pictq_cond;//解封裝的線程SDL_Thread *parse_tid;//視頻流線程SDL_Thread *video_tid;//輸入文件名稱char filename[1024];//退出標(biāo)志位int quit;AVFrame wanted_frame;SDL_AudioSpec wantedSpec = { 0 };SDL_AudioSpec audioSpec = { 0 }; } VideoState;SDL_mutex *text_mutex; SDL_Window *win; SDL_Renderer *renderer; SDL_Texture *texture;//視頻全局狀態(tài) VideoState* g_state = NULL;#endif

定義數(shù)據(jù)隊(duì)列

定義音視頻數(shù)據(jù)隊(duì)列操作,用來(lái)緩存音視頻數(shù)據(jù)包。

//datequeue.h #ifndef _DATA_QUEUE_H_ #define _DATA_QUEUE_H_#include "define.h"int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {SDL_LockMutex(is->pictq_mutex);while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit){SDL_CondWait(is->pictq_cond, is->pictq_mutex);}SDL_UnlockMutex(is->pictq_mutex);if (is->quit)return -1;AVFrame* current_frame = &is->pictq[is->pictq_windex];int ret = av_frame_make_writable(current_frame);if (!current_frame->data ||current_frame->width != is->video_ctx->width ||current_frame->height != is->video_ctx->height) {current_frame->format = pFrame->format;current_frame->width = pFrame->width;current_frame->height = pFrame->height;int ret = av_image_alloc(current_frame->data, current_frame->linesize, is->video_ctx->width, is->video_ctx->height,is->video_ctx->pix_fmt, 32);if (is->quit) {return -1;}}//縮放視頻if (current_frame){current_frame->pts = pFrame->pts;//將圖片數(shù)據(jù)添加到幀中uint8_t *src_planes[4];int src_linesize[4];av_image_fill_arrays(src_planes, src_linesize, (const uint8_t *)pFrame->data, is->video_ctx->pix_fmt,is->video_ctx->width, is->video_ctx->height, 1);//YUV數(shù)據(jù)轉(zhuǎn)變成SDL使用的紋理數(shù)據(jù)sws_scale(is->video_sws_ctx, (uint8_t const * const *)pFrame->data,pFrame->linesize, 0, is->video_ctx->height,current_frame->data, current_frame->linesize);//通知隊(duì)列的消費(fèi)者取數(shù)據(jù)if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE){is->pictq_windex = 0;}SDL_LockMutex(is->pictq_mutex);is->pictq_size++;SDL_UnlockMutex(is->pictq_mutex);}return 0; }void packet_queue_init(PacketQueue *q) {memset(q, 0, sizeof(PacketQueue));q->mutex = SDL_CreateMutex();q->cond = SDL_CreateCond(); }//添加到數(shù)據(jù)隊(duì)列中 int packet_queue_put(PacketQueue *q, AVPacket *pkt) {AVPacketList *pkt1;AVPacket* newPkt;newPkt = (AVPacket*)av_mallocz_array(1, sizeof(AVPacket));if (av_packet_ref(newPkt, pkt) < 0)return -1;pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));pkt1->pkt = *newPkt;pkt1->next = NULL;SDL_LockMutex(q->mutex);if (!q->last_pkt)q->first_pkt = pkt1;elseq->last_pkt->next = pkt1;q->last_pkt = pkt1;q->nb_packets++;q->size += newPkt->size;SDL_CondSignal(q->cond);SDL_UnlockMutex(q->mutex);return 0; }//讀取數(shù)據(jù)包中的數(shù)據(jù) int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {AVPacketList *pkt1;int ret;SDL_LockMutex(q->mutex);while (1){pkt1 = q->first_pkt;if (pkt1) {q->first_pkt = pkt1->next;if (!q->first_pkt)q->last_pkt = NULL;q->nb_packets--;q->size -= pkt1->pkt.size;*pkt = pkt1->pkt;av_free(pkt1);ret = 1;break;}else if (!block) {ret = 0;break;}else {SDL_CondWait(q->cond, q->mutex);}}SDL_UnlockMutex(q->mutex);return ret; }#endif

定義SDL庫(kù)初始化操作

//SDL_Wraper.h #ifndef _SDL_WRAPPER_H_ #define _SDL_WRAPPER_H_ #include "define.h"void InitSDL() {//初始化SDLif (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))printf("There is something wrong with your SDL Libs. Couldn't run");//打開(kāi)音頻驅(qū)動(dòng) #ifdef _WIN32SDL_AudioInit("directsound"); #endif }//SDL顯示視頻幀信息 void video_display(VideoState *is) {SDL_Rect rect;AVFrame *vp;float aspect_ratio;int w, h, x, y;int i;vp = &is->pictq[is->pictq_rindex];if (vp){SDL_UpdateYUVTexture(texture, NULL,vp->data[0], vp->linesize[0],vp->data[1], vp->linesize[1],vp->data[2], vp->linesize[2]);rect.x = 0;rect.y = 0;rect.w = is->video_ctx->width;rect.h = is->video_ctx->height;SDL_LockMutex(text_mutex);SDL_RenderClear(renderer);SDL_RenderCopy(renderer, texture, NULL, &rect);SDL_RenderPresent(renderer);SDL_UnlockMutex(text_mutex);} } #endif

定義音視頻流解析函數(shù)

音視頻流的解析函數(shù)將音視頻流的參數(shù)解析到全局?jǐn)?shù)據(jù)結(jié)構(gòu)體中。

//parser_stream.h #ifndef _PARSER_STREAM_H_ #define _PARSER_STREAM_H_#include "define.h" #include "callback.h"int stream_component_open(VideoState *is, int stream_index) {if (stream_index < 0 || stream_index >= is->pFormatCtx->nb_streams){return -1;}//查找解碼器分配上下文const AVCodec* codec = avcodec_find_decoder(is->pFormatCtx->streams[stream_index]->codecpar->codec_id);if (!codec) {fprintf(stderr, "Unsupported codec!\n");return -1;}AVCodecContext* codecCtx = avcodec_alloc_context3(codec);if (!codecCtx){fprintf(stderr, "new codec context failed!\n");return -1;}int ret = avcodec_parameters_to_context(codecCtx, is->pFormatCtx->streams[stream_index]->codecpar);if (ret < 0){return -2;}if (avcodec_open2(codecCtx, codec, NULL) < 0){fprintf(stderr, "Unsupported codec!\n");return -1;}switch (codecCtx->codec_type){case AVMEDIA_TYPE_AUDIO:is->audio_ctx = codecCtx;//設(shè)置音頻參數(shù)轉(zhuǎn)換的上下文is->audio_swr_ctx = swr_alloc();if (is->audio_swr_ctx == NULL){return -4;}//設(shè)置通道數(shù),采樣率,采樣格式的輸入輸出格式av_opt_set_channel_layout(is->audio_swr_ctx, "in_channel_layout", codecCtx->channel_layout, 0);av_opt_set_channel_layout(is->audio_swr_ctx, "out_channel_layout", codecCtx->channel_layout, 0);av_opt_set_int(is->audio_swr_ctx, "in_sample_rate", codecCtx->sample_rate, 0);av_opt_set_int(is->audio_swr_ctx, "out_sample_rate", codecCtx->sample_rate, 0);av_opt_set_sample_fmt(is->audio_swr_ctx, "in_sample_fmt", codecCtx->sample_fmt, 0);av_opt_set_sample_fmt(is->audio_swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);ret = swr_init(is->audio_swr_ctx);if (ret != 0){return -5;}//打開(kāi)音響設(shè)備memset(&is->wantedSpec, 0, sizeof(is->wantedSpec));is->wantedSpec.channels = codecCtx->channels;is->wantedSpec.freq = codecCtx->sample_rate;is->wantedSpec.format = AUDIO_S16SYS;is->wantedSpec.silence = 0;is->wantedSpec.samples = SDL_AUDIO_BUFFER_SIZE;is->wantedSpec.userdata = codecCtx; //音頻流的上下文is->wantedSpec.callback = audio_callback; //設(shè)置數(shù)據(jù)包的回調(diào)函數(shù)if (SDL_OpenAudio(&is->wantedSpec, &is->audioSpec) < 0){printf("Failed to open audio");return -6;}packet_queue_init(&is->audioq);is->wanted_frame.format = AV_SAMPLE_FMT_S16;is->wanted_frame.sample_rate = is->audioSpec.freq;is->wanted_frame.channel_layout = av_get_default_channel_layout(is->audioSpec.channels);is->wanted_frame.channels = is->audioSpec.channels;is->audioStreamIndex = stream_index;is->audio_st = is->pFormatCtx->streams[stream_index];is->audio_buf_size = 0;is->audio_buf_index = 0;memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));SDL_PauseAudio(0);break;//對(duì)視頻數(shù)據(jù)進(jìn)行處理case AVMEDIA_TYPE_VIDEO:is->video_ctx = codecCtx;is->video_st = is->pFormatCtx->streams[stream_index];is->videoStreamIndex = stream_index;is->frame_timer = (double)av_gettime() / 1000000.0;is->frame_last_delay = 40e-3;packet_queue_init(&is->videoq);is->video_sws_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height,is->video_ctx->pix_fmt, is->video_ctx->width,is->video_ctx->height, AV_PIX_FMT_YUV420P,SWS_BILINEAR, NULL, NULL, NULL);break;default:break;} } #endif

定義解封裝線程和視頻解碼線程

解封裝線程負(fù)責(zé)解析視頻文件并讀取數(shù)據(jù)包到不同的隊(duì)列中。視頻解碼線程負(fù)責(zé)將視頻數(shù)據(jù)包解析成SDL能識(shí)別的圖片數(shù)據(jù)類(lèi)型。

//thread.h #ifndef _THREAD_H_ #define _THREAD_H_ #include "define.h" #include "datequeue.h" #include "parser_stream.h"double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {double frame_delay;if (pts != 0){is->video_clock = pts;}else{pts = is->video_clock;}//更新幀的時(shí)鐘frame_delay = av_q2d(is->video_ctx->time_base);frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);is->video_clock += frame_delay;return pts; }int decode_video_thread(void *arg) {VideoState *is = (VideoState *)arg;AVPacket pkt1, packet;int64_t pts = 0;int ret = -1;AVFrame *pFrame = av_frame_alloc();while (1){//從隊(duì)列中提取數(shù)據(jù)包if (packet_queue_get(&is->videoq, &packet, 1) < 0){continue;}int ret = avcodec_send_packet(is->video_ctx, &packet);ret = avcodec_receive_frame(is->video_ctx, pFrame);if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){continue;}if (ret < 0){continue;}pts = pFrame->pts;//* av_q2d(is->video_st->time_base);;//同步視頻pts = synchronize_video(is, pFrame, pts);if (queue_picture(is, pFrame, pts) < 0){break;}av_packet_unref(&packet);}av_frame_free(&pFrame);return 0; }int demux_thread(void *arg) {int ret = -1;VideoState *is = (VideoState*)arg;AVPacket packet;AVFrame *pFrame = NULL;//打開(kāi)上下文解析數(shù)據(jù)流if (avformat_open_input(&is->pFormatCtx, is->filename, NULL, NULL) != 0)return -1;if (avformat_find_stream_info(is->pFormatCtx, NULL)<0)return -1;//查音視頻流的索引for (int i = 0; i<is->pFormatCtx->nb_streams; i++){if (is->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {is->videoStreamIndex = i;}if (is->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {is->audioStreamIndex = i;}}//解析音頻流和視頻流if (is->audioStreamIndex >= 0){stream_component_open(is, is->audioStreamIndex);}if (is->videoStreamIndex >= 0){stream_component_open(is, is->videoStreamIndex);//啟動(dòng)視頻解析線程is->video_tid = SDL_CreateThread(decode_video_thread, "decode_video_thread", is);}if (is->videoStreamIndex < 0 || is->audioStreamIndex < 0){fprintf(stderr, "%s: could not open codecs\n", is->filename);return -1;}pFrame = av_frame_alloc();//讀取數(shù)據(jù)包while (av_read_frame(is->pFormatCtx, &packet) >= 0){if (packet.stream_index == is->audioStreamIndex){packet_queue_put(&is->audioq, &packet);}else{packet_queue_put(&is->videoq, &packet);SDL_Delay(10);}av_packet_unref(&packet);}__FAIL://后處理清理數(shù)據(jù)if (pFrame) {av_frame_free(&pFrame);}if (is->audio_ctx){avcodec_close(is->audio_ctx);}SDL_Quit();return ret; } #endif

定義音視頻的解碼函數(shù)

解碼函數(shù)負(fù)責(zé)從數(shù)據(jù)隊(duì)列里面讀取音視頻數(shù)據(jù)并進(jìn)行渲染播放。

//callback.h #ifndef _CALL_BACK_H_ #define _CALL_BACK_H_ #include "define.h"//從音頻流中解析數(shù)據(jù)包 int audio_decode_frame(AVCodecContext* aCodecCtx, uint8_t* audio_buf, int buf_size) {static AVPacket pkt;static uint8_t* audio_pkt_data = NULL;static int audio_pkt_size = 0;static AVFrame frame;int len1;int data_size = 0;SwrContext* swr_ctx = NULL;while (1){//取到數(shù)據(jù)之后解析數(shù)據(jù)while (audio_pkt_size > 0){int got_frame = 0;avcodec_send_packet(aCodecCtx, &pkt);avcodec_receive_frame(aCodecCtx, &frame);len1 = frame.pkt_size;if (len1 < 0){audio_pkt_size = 0;break;}//拷貝音頻數(shù)據(jù)audio_pkt_data += len1;audio_pkt_size -= len1;data_size = 0;if (got_frame){int linesize = 1;data_size = av_samples_get_buffer_size(&linesize, aCodecCtx->channels, frame.nb_samples, aCodecCtx->sample_fmt, 1);assert(data_size <= buf_size);memcpy(audio_buf, frame.data[0], data_size);}//獲取通道信息if (frame.channels > 0 && frame.channel_layout == 0)frame.channel_layout = av_get_default_channel_layout(frame.channels);else if (frame.channels == 0 && frame.channel_layout > 0)frame.channels = av_get_channel_layout_nb_channels(frame.channel_layout);if (swr_ctx){swr_free(&swr_ctx);swr_ctx = NULL;}//對(duì)音頻格式進(jìn)行轉(zhuǎn)換,重采樣swr_ctx = swr_alloc_set_opts(NULL, g_state->wanted_frame.channel_layout, (AVSampleFormat)g_state->wanted_frame.format, g_state->wanted_frame.sample_rate,frame.channel_layout, (AVSampleFormat)frame.format, frame.sample_rate, 0, NULL);if (!swr_ctx || swr_init(swr_ctx) < 0){printf("swr_init failed\n");}int dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(swr_ctx, frame.sample_rate) + frame.nb_samples,g_state->wanted_frame.sample_rate, g_state->wanted_frame.format, AV_ROUND_INF);int len2 = swr_convert(swr_ctx, &audio_buf, dst_nb_samples,(const uint8_t**)frame.data, frame.nb_samples);if (len2 < 0){printf("swr_convert failed\n");}//data_size = 2 * g_state->wanted_frame.nb_samples * 2;data_size = g_state->wanted_frame.channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);int n = 2 * g_state->audio_ctx->channels;g_state->audio_clock += (double)data_size /(double)(n * g_state->audio_ctx->sample_rate);av_packet_unref(&pkt);if (swr_ctx){swr_free(&swr_ctx);swr_ctx = NULL;}//返回?cái)?shù)據(jù)長(zhǎng)度return data_size;//return g_state->wanted_frame.channels * len2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);}//從隊(duì)列里面取數(shù)據(jù)if (packet_queue_get(&g_state->audioq, &pkt, 1) < 0)return -1;audio_pkt_data = pkt.data;audio_pkt_size = pkt.size;} }//音頻數(shù)據(jù)包的回調(diào)函數(shù) void audio_callback(void* userdata, Uint8* stream, int len) {AVCodecContext* aCodecCtx = (AVCodecContext*)userdata;int len1, audio_size;static uint8_t audio_buff[192000 * 3 / 2];static unsigned int audio_buf_size = 0;static unsigned int audio_buf_index = 0;SDL_memset(stream, 0, len);while (len > 0){if (g_state->audio_buf_index >= g_state->audio_buf_size){audio_size = audio_decode_frame(aCodecCtx, audio_buff, sizeof(audio_buff));if (audio_size < 0){g_state->audio_buf_size = 1024*2*2;memset(audio_buff, 0, g_state->audio_buf_size);}elseg_state->audio_buf_size = audio_size;g_state->audio_buf_index = 0;}//播放取到的音頻數(shù)據(jù)len1 = g_state->audio_buf_size - g_state->audio_buf_index;if (len1 > len)len1 = len;SDL_MixAudio(stream, audio_buff + g_state->audio_buf_index, len1, SDL_MIX_MAXVOLUME);len -= len1;stream += len1;g_state->audio_buf_index += len1;} } #endif

主函數(shù)事件響應(yīng)

在主函數(shù)里面對(duì)各種資源進(jìn)行整合同時(shí)處理事件響應(yīng),定時(shí)刷新視頻流的顯示。

//main.cpp #include "define.h" #include "SDL_Wraper.h" #include "datequeue.h" #include "parser_stream.h" #include "callback.h" #include "thread.h"double get_audio_clock(VideoState *is) {double pts;int hw_buf_size, bytes_per_sec, n;pts = is->audio_clock;hw_buf_size = is->audio_buf_size - is->audio_buf_index;bytes_per_sec = 0;n = is->audio_ctx->channels * 2;if (is->audio_st) {bytes_per_sec = is->audio_ctx->sample_rate * n;}if (bytes_per_sec) {pts -= (double)hw_buf_size / bytes_per_sec;}return pts; }//定時(shí)發(fā)送事件 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {SDL_Event event;event.type = FF_REFRESH_EVENT;event.user.data1 = opaque;SDL_PushEvent(&event);SDL_Delay(40);return 0; }//添加一個(gè)定時(shí)器 static void schedule_refresh(VideoState *is, int delay) {SDL_AddTimer(delay, sdl_refresh_timer_cb, is); }void video_refresh_timer(void *userdata) {VideoState *is = (VideoState *)userdata;AVFrame *vp;int64_t delay, sync_threshold, ref_clock;double actual_delay;if (is->video_st){if (is->pictq_size == 0){schedule_refresh(is, 1);}else{vp = &is->pictq[is->pictq_rindex];delay = vp->pts - is->frame_last_pts;//存儲(chǔ)pts和delay下次使用is->frame_last_delay = delay;is->frame_last_pts = vp->pts;//獲取音頻延遲時(shí)間ref_clock = get_audio_clock(is);double diff = vp->pts * av_q2d(is->video_st->time_base) - ref_clock;/* Skip or repeat the frame. Take delay into accountFFPlay still doesn't "know if this is the best guess." */sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;if (fabs(diff) < AV_NOSYNC_THRESHOLD){if (diff <= -sync_threshold) {delay = 0;}else if (diff >= sync_threshold) {delay = 2 * delay;}}is->frame_timer += delay * av_q2d(is->video_st->time_base);//計(jì)算真正的延遲時(shí)間actual_delay = is->frame_timer - (av_gettime() / 1000000.0);if (actual_delay < 0.010){actual_delay = 0.010;}//std::cout << actual_delay << "frame_timer" << is->frame_timer << std::endl;schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));//顯示視頻幀video_display(is);//刷新視頻信息,為下次刷新做準(zhǔn)備if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {is->pictq_rindex = 0;}SDL_LockMutex(is->pictq_mutex);is->pictq_size--;SDL_CondSignal(is->pictq_cond);SDL_UnlockMutex(is->pictq_mutex);}}else{schedule_refresh(is, 100);}}int main(int argc, char *argv[]) {int ret = -1;SDL_Event event;if (argc < 2) {printf("Usage: command <file>\n");return ret;}//初始化SDLInitSDL();//初始化各種變量g_state = (VideoState*)av_mallocz(sizeof(VideoState));g_state->pictq_mutex = SDL_CreateMutex();g_state->pictq_cond = SDL_CreateCond();memcpy(g_state->filename, argv[1], sizeof(g_state->filename));//解封裝的線程g_state->parse_tid = SDL_CreateThread(demux_thread, "demux_thread", g_state);if (!g_state->parse_tid){av_free(g_state);goto __FAIL;}while (!g_state->video_ctx){SDL_Delay(10);}//創(chuàng)建窗口渲染視頻,在子線程里面創(chuàng)建會(huì)阻塞主線程的時(shí)間循環(huán)win = SDL_CreateWindow("Feifei Player",SDL_WINDOWPOS_UNDEFINED,SDL_WINDOWPOS_UNDEFINED,g_state->video_ctx->width, g_state->video_ctx->height,SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);renderer = SDL_CreateRenderer(win, -1, 0);texture = SDL_CreateTexture(renderer,SDL_PIXELFORMAT_IYUV,SDL_TEXTUREACCESS_STREAMING,g_state->video_ctx->width, g_state->video_ctx->height);schedule_refresh(g_state, 40);while (1){SDL_WaitEvent(&event);switch (event.type){case FF_QUIT_EVENT:case SDL_QUIT:g_state->quit = 1;goto __QUIT;break;case FF_REFRESH_EVENT:video_refresh_timer(event.user.data1);break;default:break;}} __QUIT:ret = 0; __FAIL:SDL_Quit();return ret; }

完善了音視頻同步之后,demo程序其實(shí)就是一個(gè)播放器的雛形了。我們可以使用它來(lái)播放各種視頻。這里以一個(gè)mkv格式的視頻為例進(jìn)行播放,播放效果如下所示:

總結(jié)

以上是生活随笔為你收集整理的FFmpeg基础:音视频同步播放的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。

如果覺(jué)得生活随笔網(wǎng)站內(nèi)容還不錯(cuò),歡迎將生活随笔推薦給好友。