音视频有三种同步方式
- 标准时间同步
- 音频时间同步
- 视频时间同步
介绍第一种也是相对简单的一种方式
- 解码获取已播放时间
- 根据记录的开始时间和当前时间差值获取实际播放的时间
- 已播放的时间-实际播放的时间=延迟时间
视频计算
//计算延迟
int64_t pts = av_frame_get_best_effort_timestamp(yuv_frame);
//转换(不同时间基时间转换)
int64_t time = av_rescale_q(pts,stream->time_base,AV_TIME_BASE_Q);
player_wait_for_frame(player,time,player->video_stream_index);
音频计算
int64_t pts = packet->pts;
if (pts != AV_NOPTS_VALUE) {
player->audio_clock = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
// av_q2d(stream->time_base) * pts;
LOGI("player_write_audio - read from pts");
player_wait_for_frame(player,player->audio_clock + AUDIO_TIME_ADJUST_US,
player->audio_stream_index);
}
延迟计算
/**
* 获取视频当前播放时间
*/
int64_t player_get_current_video_time(Player *player) {
int64_t current_time = av_gettime();
return current_time - player->start_time;
}
/**
* 延迟
*/
void player_wait_for_frame(Player *player, int64_t stream_time,
int stream_no) {
pthread_mutex_lock(&player->mutex);
for(;;){
int64_t current_video_time = player_get_current_video_time(player);
int64_t sleep_time = stream_time - current_video_time;
if (sleep_time < -300000ll) {
// 300 ms late
//这段代码对于300毫秒的处理我也不知道什么意思,拿过来用,有大佬知道什么意思的可以回复一下,不胜感激。
int64_t new_value = player->start_time - sleep_time;
LOGI("player_wait_for_frame[%d] correcting %f to %f because late",
stream_no, (av_gettime() - player->start_time) / 1000000.0,
(av_gettime() - new_value) / 1000000.0);
player->start_time = new_value;
pthread_cond_broadcast(&player->cond);
}
if (sleep_time <= MIN_SLEEP_TIME_US) {
// We do not need to wait if time is slower then minimal sleep time
break;
}
if (sleep_time > 500000ll) {
// if sleep time is bigger then 500ms just sleep this 500ms
// and check everything again
sleep_time = 500000ll;
}
//等待指定时长
int timeout_ret = pthread_cond_timeout_np(&player->cond,
&player->mutex, sleep_time/1000ll);
// just go further
LOGI("player_wait_for_frame[%d] finish", stream_no);
}
pthread_mutex_unlock(&player->mutex);
}
网友评论