温故知新。
很久没有研究ffmpeg了,版本变化很大,用一天时间先把ffplay翻出来看看。
基于2018年9月22日github master版本。
流程
int main(int argc, char **argv)
{
...
is = stream_open(input_filename, file_iformat);//打开文件 ,同时开启解码线程
if (!is) {
av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
do_exit(NULL);
}
event_loop(is);//主线程,用的于播放显示视频和SDL GUI处理。
...
}
ffplay使用了SDL库做为播放框架,解码使用ffmpeg库。
SDL (Simple DirectMedia Layer)是一套开源代码的跨平台多媒体开发库,使用C语言写成。SDL提供了数种控制图像、声音、输出入的函数,让开发者只要用相同或是相似的代码就可以开发出跨多个平台(Linux、Windows、Mac OS X等)的应用软件。目前SDL多用于开发游戏、模拟器、媒体播放器等多媒体应用领域。
SDL使用GNU宽通用公共许可证为授权方式,意指动态链接(dynamic link)其库并不需要开放本身的源代码。因此诸如《雷神之锤4》等商业游戏也使用SDL来开发。
stream_open
:
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
{
VideoState *is;
is = av_mallocz(sizeof(VideoState));
if (!is)
return NULL;
is->filename = av_strdup(filename);
if (!is->filename)
goto fail;
is->iformat = iformat;
is->ytop = 0;
is->xleft = 0;
/* start video display */
//为音、视频和字幕创建单独的帧队列
if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
goto fail;
if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
goto fail;
if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
goto fail;
if (packet_queue_init(&is->videoq) < 0 ||
packet_queue_init(&is->audioq) < 0 ||
packet_queue_init(&is->subtitleq) < 0)
goto fail;
if (!(is->continue_read_thread = SDL_CreateCond())) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
goto fail;
}
//初始化时钟,serial从0开始
init_clock(&is->vidclk, &is->videoq.serial);
init_clock(&is->audclk, &is->audioq.serial);
init_clock(&is->extclk, &is->extclk.serial);
is->audio_clock_serial = -1;
if (startup_volume < 0)
av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
if (startup_volume > 100)
av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
startup_volume = av_clip(startup_volume, 0, 100);
startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
is->audio_volume = startup_volume;
is->muted = 0;
is->av_sync_type = av_sync_type;
is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);//解码读取线程
if (!is->read_tid) {
av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
fail:
stream_close(is);
return NULL;
}
return is;
}
stream_open
为音、视频和字幕使用frame_queue_init初始化了单独的解码帧队列,初始化了时钟clock,并且开启了read_thread
线程。
init_clock
调用了set_clock
然后av_gettime_relative
方法。
av_gettime_relative
方法,返回的是微秒为单位,代码有两种实现,具体看下面注释:
/**
* Get the current time in microseconds since some unspecified starting point.
* On platforms that support it, the time comes from a monotonic clock
* This property makes this time source ideal for measuring relative time.
* The returned values may not be monotonic on platforms where a monotonic
* clock is not available.
*/
int64_t av_gettime_relative(void);
//gettimeofday获得1970年1月1日到现在的时间
int64_t av_gettime(void)
{
#if HAVE_GETTIMEOFDAY //linux方法,tv.tv_usec为微秒数,tv_sec为秒数,所以返回的是微秒为单位
struct timeval tv;
gettimeofday(&tv, NULL);
return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
#elif HAVE_GETSYSTEMTIMEASFILETIME//windows方法
FILETIME ft;
int64_t t;
GetSystemTimeAsFileTime(&ft);
t = (int64_t)ft.dwHighDateTime << 32 | ft.dwLowDateTime;
return t / 10 - 11644473600000000; /* Jan 1, 1601 */
#else
return -1;
#endif
}
int64_t av_gettime_relative(void)//返回的是微秒为单位
{
//如果定义了CLOCK_MONOTONIC而且有clock_gettime方法,则使用clock_gettime
//clock_gettime(CLOCK_MONOTONIC,..):从系统启动这一刻起开始计时,不受系统时间被用户改变的影响
#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
#ifdef __APPLE__
if (clock_gettime)
#endif
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (int64_t)ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
}
#endif
//故意加上42小时的秒数
return av_gettime() + 42 * 60 * 60 * INT64_C(1000000);
}
再来看看read_thread
线程:
/* this thread gets the stream from the disk or the network */
static int read_thread(void *arg)
{
//前面打开一堆代码打开stream,获取stream信息等。
...
is->realtime = is_realtime(ic);//判断是否实时网络流rtp,rtsp,sdp,udp
...
//打开各种流,进行解码
/* open the streams */
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
}
ret = -1;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
}
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
}
...
if (infinite_buffer < 0 && is->realtime)//实时流
infinite_buffer = 1;
for (;;) {
if (is->abort_request)
break;
if (is->paused != is->last_paused) {//暂停与继续播放处理
...
}
...
//seek处理
if (is->seek_req) {
...
}
//非实时流最多读取15M或者音视频和字幕队列都满了则等待10毫秒
/* if the queue are full, no need to read more */
if (infinite_buffer<1 &&
(is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
|| (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
/* wait 10 ms */
SDL_LockMutex(wait_mutex);
SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
SDL_UnlockMutex(wait_mutex);
continue;
}
//结束了,从头播放或者退出
if (!is->paused &&
(!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
(!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
if (loop != 1 && (!loop || --loop)) {
stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
} else if (autoexit) {
ret = AVERROR_EOF;
goto fail;
}
}
//主要流程就是不停的读取帧到队列中,如果队列满了则等等。
//同时外部播放线程不断从队列中取帧进行播放。
ret = av_read_frame(ic, pkt);//关键代码:读取帧
if (ret < 0) {//错误或者结束,队列放入一个空包
if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
if (is->video_stream >= 0)
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
if (is->audio_stream >= 0)
packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
if (is->subtitle_stream >= 0)
packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
is->eof = 1;
}
if (ic->pb && ic->pb->error)
break;
SDL_LockMutex(wait_mutex);
SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
SDL_UnlockMutex(wait_mutex);
continue;
} else {
is->eof = 0;
}
//检测包如果在播放的范围就放入队列,否则丢弃
/* check if packet is in play range specified by user, then queue, otherwise discard */
stream_start_time = ic->streams[pkt->stream_index]->start_time;
pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;//包的时间戳,先取pts播放时间戳,取不到则取dts解码时间戳
//duration == AV_NOPTS_VALUE表示命令行未设置此参数,如果设置了则duration表示播放总时长。
//后面一段是 包的时间戳与开始播放的时间戳之差即当前包的时间位置,也就是这个包应该在第几秒播放。
//跟start_time真正播放开始的时间之差如果小于等于duration总时长则表示包有效然后加入队列,无效则丢弃。
pkt_in_play_range = duration == AV_NOPTS_VALUE ||
(pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
av_q2d(ic->streams[pkt->stream_index]->time_base) -
(double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
<= ((double)duration / 1000000);
if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
packet_queue_put(&is->audioq, pkt);
} else if (pkt->stream_index == is->video_stream && pkt_in_play_range
&& !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
packet_queue_put(&is->videoq, pkt);
} else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
packet_queue_put(&is->subtitleq, pkt);
} else {
av_packet_unref(pkt);
}
}
ret = 0;
fail:
...
}
先打开各种stream,stream_component_open
开启独立的解码线程,本线程不断循环av_read_frame
读取帧数据packet_queue_put
放到流的各自队列中。
比较关键的av_read_frame
从文件或者网络流中读取未解码的帧,具体代码属于ffmpeg库这儿就不展开分析了,这儿只讨ffplay。
stream_component_open
:
/* open a given stream. Return 0 if OK */
static int stream_component_open(VideoState *is, int stream_index)
{
...
codec = avcodec_find_decoder(avctx->codec_id);//查找解码器
...
//每个stream解码都有自己独立的线程,
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
...
//打开声音播放设备,同时设置了sdl音频播放回调方法 sdl_audio_callback
/* prepare audio output */
if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
goto fail;
is->audio_hw_buf_size = ret;
is->audio_src = is->audio_tgt;
is->audio_buf_size = 0;
is->audio_buf_index = 0;
/* init averaging filter */
is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
is->audio_diff_avg_count = 0;
/* since we do not have a precise anough audio FIFO fullness,
we correct audio sync only if larger than this threshold */
is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
is->audio_stream = stream_index;
is->audio_st = ic->streams[stream_index];
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
is->auddec.start_pts = is->audio_st->start_time;
is->auddec.start_pts_tb = is->audio_st->time_base;
}
if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)//音频解码线程audio_thread
goto out;
SDL_PauseAudioDevice(audio_dev, 0);
break;
case AVMEDIA_TYPE_VIDEO:
//视频解码线程video_thread
is->video_stream = stream_index;
is->video_st = ic->streams[stream_index];
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
goto out;
is->queue_attachments_req = 1;
break;
case AVMEDIA_TYPE_SUBTITLE:
is->subtitle_stream = stream_index;
is->subtitle_st = ic->streams[stream_index];
decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
goto out;
break;
default:
break;
}
goto out;
fail:
avcodec_free_context(&avctx);
out:
av_dict_free(&opts);
return ret;
}
音、视、字幕各自拥有独立的解码线程。
音频:
audio_open
打开了声音播放设备,设置了sdl音频播放回调方法sdl_audio_callback
,在sdl_audio_callback
中不断循环调用audio_decode_frame
取出已经解码的音频帧,并且有需要的话会转换到要求的音频格式,然后交给SDL进行播放。
audio_thread
循环调用decoder_decode_frame
进行音频解码。
视频:
video_thread
线程循环调用get_video_frame
,get_video_frame
又调用了decoder_decode_frame
进行解码。
字幕同理。
三者都调用了decoder_decode_frame
进行解码,根据codec_type
区分处理。
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int ret = AVERROR(EAGAIN);
for (;;) {
AVPacket pkt;
if (d->queue->serial == d->pkt_serial) {
do {
if (d->queue->abort_request)
return -1;
switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO://接收解码后的数据
ret = avcodec_receive_frame(d->avctx, frame);
if (ret >= 0) {
if (decoder_reorder_pts == -1) {
frame->pts = frame->best_effort_timestamp;
} else if (!decoder_reorder_pts) {
frame->pts = frame->pkt_dts;
}
}
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_receive_frame(d->avctx, frame);
if (ret >= 0) {
AVRational tb = (AVRational){1, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
d->next_pts = frame->pts + frame->nb_samples;
d->next_pts_tb = tb;
}
}
break;
}
if (ret == AVERROR_EOF) {
d->finished = d->pkt_serial;
avcodec_flush_buffers(d->avctx);
return 0;
}
if (ret >= 0)
return 1;
} while (ret != AVERROR(EAGAIN));
}
do {
if (d->queue->nb_packets == 0)
SDL_CondSignal(d->empty_queue_cond);
if (d->packet_pending) {
av_packet_move_ref(&pkt, &d->pkt);
d->packet_pending = 0;
} else {
if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)//从队列中获取包
return -1;
}
} while (d->queue->serial != d->pkt_serial);
if (pkt.data == flush_pkt.data) {
avcodec_flush_buffers(d->avctx);
d->finished = 0;
d->next_pts = d->start_pts;
d->next_pts_tb = d->start_pts_tb;
} else {
if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
int got_frame = 0;
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
if (ret < 0) {
ret = AVERROR(EAGAIN);
} else {
if (got_frame && !pkt.data) {
d->packet_pending = 1;
av_packet_move_ref(&d->pkt, &pkt);
}
ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
}
} else {
//发送包用于解码
if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
d->packet_pending = 1;
av_packet_move_ref(&d->pkt, &pkt);
}
}
av_packet_unref(&pkt);
}
}
}
packet_queue_get
取出从read_thread
加入的未解码包。交给avcodec_send_packet
。
avcodec_send_packet
和avcodec_receive_frame
是libavcodec中用于配对的解码方法。
avcodec_send_packet
给解码器发送未解码包,avcodec_receive_frame
取出解码后的帧。
avcodec_send_packet
发送的未解码包可能需要多次调用avcodec_receive_frame才可以解码1帧。
解码后的数据存储在AVFrame *frame
中,函数返回。
视频使用queue_picture
把解码后的图像加入到队列中。解码后的音频使用frame_queue_push(&is->sampq);
加入队列。
音频的播放:
音频在sdl_audio_callback
中从解码音频队列不断取出解码音频进行播放。
视频的播放:
视频在主线程中播放显示。
event_loop
中处理播放显示和GUI消息处理。
/* handle an event sent by the GUI */
static void event_loop(VideoState *cur_stream)//主线程,用的于播放显示视频和SDL GUI处理。
SDL_Event event;
double incr, pos, frac;
for (;;) {
double x;
refresh_loop_wait_event(cur_stream, &event);//播放显示
switch (event.type) {
case SDL_KEYDOWN://各种键盘操作
if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
do_exit(cur_stream);
break;
}
// If we don't yet have a window, skip all key events, because read_thread might still be initializing...
if (!cur_stream->width)
continue;
switch (event.key.keysym.sym) {
case SDLK_f:
toggle_full_screen(cur_stream);
cur_stream->force_refresh = 1;
break;
case SDLK_p:
case SDLK_SPACE:
toggle_pause(cur_stream);
break;
case SDLK_m:
toggle_mute(cur_stream);
break;
case SDLK_KP_MULTIPLY:
case SDLK_0:
update_volume(cur_stream, 1, SDL_VOLUME_STEP);
break;
case SDLK_KP_DIVIDE:
case SDLK_9:
update_volume(cur_stream, -1, SDL_VOLUME_STEP);
break;
case SDLK_s: // S: Step to next frame
step_to_next_frame(cur_stream);
break;
case SDLK_a:
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
break;
case SDLK_v:
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
break;
case SDLK_c:
...
break;
case SDLK_t:
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
break;
case SDLK_w:
...
break;
case SDLK_PAGEUP:
...
break;
case SDLK_PAGEDOWN:
...
break;
case SDLK_LEFT:
incr = seek_interval ? -seek_interval : -10.0;
goto do_seek;
case SDLK_RIGHT:
incr = seek_interval ? seek_interval : 10.0;
goto do_seek;
case SDLK_UP:
incr = 60.0;
goto do_seek;
case SDLK_DOWN:
incr = -60.0;
do_seek:
...
break;
default:
break;
}
break;
case SDL_MOUSEBUTTONDOWN://鼠标单击
if (exit_on_mousedown) {
do_exit(cur_stream);
break;
}
if (event.button.button == SDL_BUTTON_LEFT) {
...
}
case SDL_MOUSEMOTION://鼠标移动
...
break;
case SDL_WINDOWEVENT://窗口消息
switch (event.window.event) {
case SDL_WINDOWEVENT_RESIZED:
...
case SDL_WINDOWEVENT_EXPOSED:
...
}
break;
case SDL_QUIT:
case FF_QUIT_EVENT:
do_exit(cur_stream);
break;
default:
break;
}
}
}
refresh_loop_wait_event
进行播放显示图像:
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
...
video_refresh(is, &remaining_time);//播放显示
...
}
/* called to display each frame */
static void video_refresh(void *opaque, double *remaining_time)
{
VideoState *is = opaque;
double time;
Frame *sp, *sp2;
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)//实时播放
check_external_clock_speed(is);
if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
time = av_gettime_relative() / 1000000.0;
if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
video_display(is);
is->last_vis_time = time;
}
*remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
}
if (is->video_st) {
retry:
if (frame_queue_nb_remaining(&is->pictq) == 0) {
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
Frame *vp, *lastvp;
/* dequeue the picture */
lastvp = frame_queue_peek_last(&is->pictq);
vp = frame_queue_peek(&is->pictq);
if (vp->serial != is->videoq.serial) {
frame_queue_next(&is->pictq);
goto retry;
}
if (lastvp->serial != vp->serial)
is->frame_timer = av_gettime_relative() / 1000000.0;
if (is->paused)
goto display;
/* compute nominal last_duration */
last_duration = vp_duration(is, lastvp, vp);//名义上的帧持续时间,
delay = compute_target_delay(last_duration, is);//猜测计算延迟
/*frame_timer实际上就是上一帧的播放时间,该时间是一个系统时间,
而 frame_timer + delay 实际上就是当前这一帧的播放时间*/
time= av_gettime_relative()/1000000.0;
if (time < is->frame_timer + delay) {//播放太慢了马上播放
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
goto display;
}
is->frame_timer += delay;
if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
is->frame_timer = time;
SDL_LockMutex(is->pictq.mutex);
if (!isnan(vp->pts))
update_video_pts(is, vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq.mutex);
if (frame_queue_nb_remaining(&is->pictq) > 1) {
Frame *nextvp = frame_queue_peek_next(&is->pictq);
duration = vp_duration(is, vp, nextvp);
if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
is->frame_drops_late++;//丢弃帧
frame_queue_next(&is->pictq);
goto retry;
}
}
if (is->subtitle_st) {
...
}
frame_queue_next(&is->pictq);
is->force_refresh = 1;
if (is->step && !is->paused)
stream_toggle_pause(is);
}
display:
/* display picture */
if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
video_display(is); //绘制图像
}
is->force_refresh = 0;
...
}
在video_refresh
中进行了音视频同步,默认是使用音频做为基准参考,同步视频,进行马上显示视频或者丢弃视频等处理。
音视频同步的细节在下一篇文章中细说。
总结
read_thread
线程从文件或者网络流中读取未解码包到音、视、字幕各自的队列中。
音频:
audio_thread
线程进行音频解码。
sdl_audio_callback
进行SDL音频回调播放。
视频:
video_thread
线程进行视频解码。
event_loop
主线程处理播放显示视频图像和界面操作。
网友评论