距离上一篇博客,已经过去很久了,还是要多写博客才行。那么接下来我将介绍如何将ffplay播放器移植到Android中。这里首先上GitHub地址:CainSDLPlayer
开发环境介绍
Android Studio 3.0.1
FFmpeg-3.3.3
SDL2.0.7
简介
在FFmpeg编程开发笔记 —— ffplay.c 源码注释这篇文章中,我给ffplay的核心部分做了注释。如何通读过ffplay的源码,大家都会很清楚ffplay 的核心部分的构造是怎样的。这里借用一下雷博士博客中关于ffplay的架构图,大家可以去看雷霄骅博士的博客 ,这里非常感谢雷博士分享的资料,本人也从中获益良多。
看到上面的架构,应该了解ffplay的整体框架构成了,ffplay是基于SDL实现的。所以我们想要移植ffplay的话,首先要将SDL移植到Android中。移植过程可以参考本人的文章 —— FFmpeg编程开发笔记 —— Android 移植 FFmpeg + SDL2.0 库。这里就不做过多的介绍了。
移植过程
这里,本人默认你已经成功将SDL2.0移植到了Android Studio当中。
从上面的架构图以及ffplay的源码注释文章中可以了解到,ffplay 根据输入的文件名,创建解复用上下文,进入解复用线程read_thread的。在read_thread 方法中,查找媒体流AVStream,并根据设置调用stream_component_open打开相应的媒体流。之后进入解复用阶段,从文件中源源不断地读入AVPacket,存放到PacketQueue队列中,解码器又不断取出AVPacket进行解码,然后将解码得到的AVFrame存放到FrameQueue当中。所以这里我们首先将ffplay的头文件和定义结构体、PacketQueue 和 FrameQueue提取出来。
ffplay_def.h:
#ifndef CAINPLAYER_FFPLAYE_DEF_H
#define CAINPLAYER_FFPLAYE_DEF_H
#ifdef __cplusplus
extern "C" {
#endif
#include "ff_config.h"
#include <inttypes.h>
#include <math.h>
#include <limits.h>
#include <signal.h>
#include <stdint.h>
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavutil/dict.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "libswresample/swresample.h"
#include <SDL.h>
#include "Mutex.h"
#include "Thread.h"
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
#define MIN_FRAMES 25
#define EXTERNAL_CLOCK_MIN_FRAMES 2
#define EXTERNAL_CLOCK_MAX_FRAMES 10
/* Minimum SDL audio buffer size, in samples. */
#define SDL_AUDIO_MIN_BUFFER_SIZE 512
/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
/* Step size for volume control in dB */
#define SDL_VOLUME_STEP (0.75)
/* no AV sync correction is done if below the minimum AV sync threshold */
#define AV_SYNC_THRESHOLD_MIN 0.04
/* AV sync correction is done if above the maximum AV sync threshold */
#define AV_SYNC_THRESHOLD_MAX 0.1
/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
#define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0
/* maximum audio speed change to get correct sync */
#define SAMPLE_CORRECTION_PERCENT_MAX 10
/* external clock speed adjustment constants for realtime sources based on buffer fullness */
#define EXTERNAL_CLOCK_SPEED_MIN 0.900
#define EXTERNAL_CLOCK_SPEED_MAX 1.010
#define EXTERNAL_CLOCK_SPEED_STEP 0.001
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
#define AUDIO_DIFF_AVG_NB 20
/* polls for possible required screen refresh at least this often, should be less than 1/fps */
#define REFRESH_RATE 0.01
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
/* TODO: We assume that a decoded and resampled frame fits into this buffer */
#define SAMPLE_ARRAY_SIZE (8 * 65536)
static unsigned sws_flags = SWS_BICUBIC;
typedef struct MyAVPacketList {
AVPacket pkt;
struct MyAVPacketList *next;
int serial;
} MyAVPacketList;
typedef struct PacketQueue {
MyAVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
int64_t duration;
int abort_request;
int serial;
Mutex *mutex;
Cond *cond;
} PacketQueue;
#define VIDEO_PICTURE_QUEUE_SIZE 3
#define SUBPICTURE_QUEUE_SIZE 16
#define SAMPLE_QUEUE_SIZE 9
#define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
typedef struct AudioParams {
int freq;
int channels;
int64_t channel_layout;
enum AVSampleFormat fmt;
int frame_size;
int bytes_per_sec;
} AudioParams;
typedef struct Clock {
double pts; /* clock base */
double pts_drift; /* clock base minus time at which we updated the clock */
double last_updated;
double speed;
int serial; /* clock is based on a packet with this serial */
int paused;
int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
} Clock;
/* Common struct for handling all types of decoded data and allocated render buffers. */
typedef struct Frame {
AVFrame *frame;
AVSubtitle sub;
int serial;
double pts; /* presentation timestamp for the frame */
double duration; /* estimated duration of the frame */
int64_t pos; /* byte position of the frame in the input file */
int width;
int height;
int format;
AVRational sar;
int uploaded;
int flip_v;
} Frame;
typedef struct FrameQueue {
Frame queue[FRAME_QUEUE_SIZE];
int rindex;
int windex;
int size;
int max_size;
int keep_last;
int rindex_shown;
Mutex *mutex;
Cond *cond;
PacketQueue *pktq;
} FrameQueue;
enum {
AV_SYNC_AUDIO_MASTER, /* default choice */
AV_SYNC_VIDEO_MASTER,
AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
};
typedef struct Decoder {
AVPacket pkt;
AVPacket pkt_temp;
PacketQueue *queue;
AVCodecContext *avctx;
int pkt_serial;
int finished;
int packet_pending;
Cond *empty_queue_cond;
int64_t start_pts;
AVRational start_pts_tb;
int64_t next_pts;
AVRational next_pts_tb;
Thread *decoder_tid;
} Decoder;
enum ShowMode {
SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
};
typedef struct VideoState {
int abort_request; // 停止
int force_refresh; // 强制刷新
int paused; // 暂停标志
int last_paused; // 上一次暂停状态标志
int queue_attachments_req; // 附着请求
int seek_req; // 定位请求
int seek_flags; // 定位标志
int64_t seek_pos; // 定位位置(秒)
int64_t seek_rel; //
int read_pause_return; // 上一次暂停回调
AVFormatContext *ic; // 解复用上下文
int realtime; // 是否实时流
Clock audioClock; // 音频时钟
Clock videoClock; // 视频时钟
Clock extClock; // 外部时钟
FrameQueue videoFrameQueue; // 视频帧队列
FrameQueue audioFrameQueue; // 音频帧队列
Decoder audioDecoder; // 音频解码器
Decoder videoDecoder; // 视频解码器
int av_sync_type; // 同步类型,默认是同步到音频
int audioStreamIdx; // 音频流索引
AVStream *audioStream; // 音频流
PacketQueue audioQueue; // 音频裸数据包队列
int audio_volume; // 音量大小
int muted; // 是否静音
double audio_clock;
int audio_clock_serial;
double audio_diff_cum; /* used for AV difference average computation */
double audio_diff_avg_coef;
double audio_diff_threshold;
int audio_diff_avg_count;
int audio_hw_buf_size;
uint8_t *audio_buf;
uint8_t *audio_buf1;
unsigned int audio_buf_size; /* in bytes */
unsigned int audio_buf1_size;
int audio_buf_index; /* in bytes */
int audio_write_buf_size; // 写入大小
struct AudioParams audio_src;
struct AudioParams audio_tgt;
struct SwrContext *swr_ctx; // 音频转码上下文
enum ShowMode show_mode; // 显示模式,音频波形还是视频画面,这里仅支持视频画面
SDL_Texture *vid_texture; // 视频渲染Texture
double frame_timer; // 帧计时器,记录当前帧的时钟
double frame_last_filter_delay; // 上一个滤镜延时
int videoStreamIdx; // 视频流索引
AVStream *videoStream; // 视频流
PacketQueue videoQueue; // 视频裸数据包队列
double max_frame_duration; // 最大帧间隔
struct SwsContext *img_convert_ctx; // 视频转码上下文
int eof; // 结尾标志
char *filename; // 文件名
int width, height, xleft, ytop; // 宽高和起始位置等
int step; // 跳帧
Thread *readThread; // 读文件线程
Cond *readCondition; // 读文件条件锁
Thread *videRefreshThread; // 画面刷新线程
} VideoState;
#ifdef __cplusplus
};
#endif
#endif //CAINPLAYER_FFPLAYE_DEF_H
在这里,你会发现。头文件中关于字幕和 AVFilter相关的东西我都删掉了,并且关于音频波形的texture也删掉了。我们首先只做音频流和视频流的播放器,了解播放器的本质(音频流和视频流的核心流程,而且Android手机环境下对AVFilter的处理还是交给OpenGLES来处理会比较好,不过这是后话了。这里暂时不讨论这么长远的东西,我们首先得得到一个能用的基于ffplay的播放器。
PacketQueue.h:
#ifndef CAINPLAYER_PACKETQUEUE_H
#define CAINPLAYER_PACKETQUEUE_H
#include "ffplay_def.h"
#ifdef __cplusplus
extern "C" {
#endif
// 入队裸数据包
int packet_queue_put_private(PacketQueue *q, AVPacket *pkt);
// 入队裸数据包
int packet_queue_put(PacketQueue *q, AVPacket *pkt);
// 入队空的裸数据包
int packet_queue_put_nullpacket(PacketQueue *q, int stream_index);
// 初始化队列
int packet_queue_init(PacketQueue *q);
// 刷出剩余裸数据包
void packet_queue_flush(PacketQueue *q);
// 销毁裸数据包队列
void packet_queue_destroy(PacketQueue *q);
// 停止入队
void packet_queue_abort(PacketQueue *q);
// 开始入队
void packet_queue_start(PacketQueue *q);
// 获取裸数据包
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial);
#ifdef __cplusplus
};
#endif
#endif //CAINPLAYER_PACKETQUEUE_H
PacketQueue.cpp:
#include "PacketQueue.h"
#ifdef __cplusplus
extern "C" {
#endif
extern AVPacket flush_pkt;
/**
* 入队裸数据包
* @param q
* @param pkt
* @return
*/
int packet_queue_put_private(PacketQueue *q, AVPacket *pkt) {
MyAVPacketList *pkt1;
if (q->abort_request)
return -1;
pkt1 = (MyAVPacketList *) av_malloc(sizeof(MyAVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
if (pkt == &flush_pkt)
q->serial++;
pkt1->serial = q->serial;
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size + sizeof(*pkt1);
q->duration += pkt1->pkt.duration;
/* XXX: should duplicate packet data in DV case */
CondSignal(q->cond);
return 0;
}
/**
* 入队裸数据包
* @param q
* @param pkt
* @return
*/
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
int ret;
MutexLock(q->mutex);
ret = packet_queue_put_private(q, pkt);
MutexUnlock(q->mutex);
if (pkt != &flush_pkt && ret < 0)
av_packet_unref(pkt);
return ret;
}
/**
* 入队空数据
* @param q
* @param stream_index
* @return
*/
int packet_queue_put_nullpacket(PacketQueue *q, int stream_index) {
AVPacket pkt1, *pkt = &pkt1;
av_init_packet(pkt);
pkt->data = NULL;
pkt->size = 0;
pkt->stream_index = stream_index;
return packet_queue_put(q, pkt);
}
/**
* 初始化裸数据队列
* @param q
* @return
*/
int packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->mutex = MutexCreate();
if (!q->mutex) {
av_log(NULL, AV_LOG_FATAL, "MutexCreate(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
q->cond = CondCreate();
if (!q->cond) {
av_log(NULL, AV_LOG_FATAL, "CondCreate(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
q->abort_request = 1;
return 0;
}
/**
* 刷出剩余帧
* @param q
*/
void packet_queue_flush(PacketQueue *q) {
MyAVPacketList *pkt, *pkt1;
MutexLock(q->mutex);
for (pkt = q->first_pkt; pkt; pkt = pkt1) {
pkt1 = pkt->next;
av_packet_unref(&pkt->pkt);
av_freep(&pkt);
}
q->last_pkt = NULL;
q->first_pkt = NULL;
q->nb_packets = 0;
q->size = 0;
q->duration = 0;
MutexUnlock(q->mutex);
}
/**
* 销毁队列
* @param q
*/
void packet_queue_destroy(PacketQueue *q) {
packet_queue_flush(q);
MutexDestroy(q->mutex);
CondDestroy(q->cond);
}
/**
* 裸数据队列停止
* @param q
*/
void packet_queue_abort(PacketQueue *q) {
MutexLock(q->mutex);
q->abort_request = 1;
CondSignal(q->cond);
MutexUnlock(q->mutex);
}
/**
* 裸数据包队列开始
* @param q
*/
void packet_queue_start(PacketQueue *q) {
MutexLock(q->mutex);
q->abort_request = 0;
packet_queue_put_private(q, &flush_pkt);
MutexUnlock(q->mutex);
}
/**
* 取出裸数据包
* @param q
* @param pkt
* @param block
* @param serial
* @return
*/
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial) {
MyAVPacketList *pkt1;
int ret;
MutexLock(q->mutex);
for (;;) {
if (q->abort_request) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size + sizeof(*pkt1);
q->duration -= pkt1->pkt.duration;
*pkt = pkt1->pkt;
if (serial)
*serial = pkt1->serial;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
CondWait(q->cond, q->mutex);
}
}
MutexUnlock(q->mutex);
return ret;
}
#ifdef __cplusplus
};
#endif
这里,PacketQueue 就是将ffplay中的对应方法移植过来,将原来的方法去掉static关键字。这么做只是为了后续方面将播放器的核心封装成C++,方便以后的音视频处理。
FrameQueue.h:
#ifndef CAINPLAYER_FRAMEQUEUE_H
#define CAINPLAYER_FRAMEQUEUE_H
#include "ffplay_def.h"
#ifdef __cplusplus
extern "C" {
#endif
void frame_queue_unref_item(Frame *vp);
int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last);
void frame_queue_destory(FrameQueue *f);
void frame_queue_signal(FrameQueue *f);
Frame *frame_queue_peek(FrameQueue *f);
Frame *frame_queue_peek_next(FrameQueue *f);
Frame *frame_queue_peek_last(FrameQueue *f);
Frame *frame_queue_peek_writable(FrameQueue *f);
Frame *frame_queue_peek_readable(FrameQueue *f);
void frame_queue_push(FrameQueue *f);
void frame_queue_next(FrameQueue *f);
int frame_queue_nb_remaining(FrameQueue *f);
int64_t frame_queue_last_pos(FrameQueue *f);
#ifdef __cplusplus
};
#endif
#endif //CAINPLAYER_FRAMEQUEUE_H
FrameQueue.cpp:
#include "FrameQueue.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* 释放帧的引用
* @param vp
*/
void frame_queue_unref_item(Frame *vp) {
av_frame_unref(vp->frame);
avsubtitle_free(&vp->sub);
}
/**
* 帧队列初始化
* @param f
* @param pktq
* @param max_size
* @param keep_last
* @return
*/
int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last) {
int i;
memset(f, 0, sizeof(FrameQueue));
if (!(f->mutex = MutexCreate())) {
av_log(NULL, AV_LOG_FATAL, "MutexCreate(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
if (!(f->cond = CondCreate())) {
av_log(NULL, AV_LOG_FATAL, "CondCreate(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
f->pktq = pktq;
f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
f->keep_last = !!keep_last;
for (i = 0; i < f->max_size; i++) {
if (!(f->queue[i].frame = av_frame_alloc()))
return AVERROR(ENOMEM);
}
return 0;
}
/**
* 销毁帧队列
* @param f
*/
void frame_queue_destory(FrameQueue *f) {
int i;
for (i = 0; i < f->max_size; i++) {
Frame *vp = &f->queue[i];
frame_queue_unref_item(vp);
av_frame_free(&vp->frame);
}
MutexDestroy(f->mutex);
CondDestroy(f->cond);
}
/**
* 通知
* @param f
*/
void frame_queue_signal(FrameQueue *f) {
MutexLock(f->mutex);
CondSignal(f->cond);
MutexUnlock(f->mutex);
}
/**
* 查找当前帧
* @param f
* @return
*/
Frame *frame_queue_peek(FrameQueue *f) {
return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
}
/**
* 查找下一帧
* @param f
* @return
*/
Frame *frame_queue_peek_next(FrameQueue *f) {
return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
}
/**
* 查找上一帧
* @param f
* @return
*/
Frame *frame_queue_peek_last(FrameQueue *f) {
return &f->queue[f->rindex];
}
/**
* 查找可写帧
* @param f
* @return
*/
Frame *frame_queue_peek_writable(FrameQueue *f) {
/* wait until we have space to put a new frame */
MutexLock(f->mutex);
while (f->size >= f->max_size &&
!f->pktq->abort_request) {
CondWait(f->cond, f->mutex);
}
MutexUnlock(f->mutex);
if (f->pktq->abort_request) {
return NULL;
}
return &f->queue[f->windex];
}
/**
* 查找可读帧
* @param f
* @return
*/
Frame *frame_queue_peek_readable(FrameQueue *f) {
/* wait until we have a readable a new frame */
MutexLock(f->mutex);
while (f->size - f->rindex_shown <= 0 &&
!f->pktq->abort_request) {
CondWait(f->cond, f->mutex);
}
MutexUnlock(f->mutex);
if (f->pktq->abort_request) {
return NULL;
}
return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
}
/**
* 入队
* @param f
*/
void frame_queue_push(FrameQueue *f) {
if (++f->windex == f->max_size) {
f->windex = 0;
}
MutexLock(f->mutex);
f->size++;
CondSignal(f->cond);
MutexUnlock(f->mutex);
}
/**
* 下一帧
* @param f
*/
void frame_queue_next(FrameQueue *f) {
if (f->keep_last && !f->rindex_shown) {
f->rindex_shown = 1;
return;
}
frame_queue_unref_item(&f->queue[f->rindex]);
if (++f->rindex == f->max_size) {
f->rindex = 0;
}
MutexLock(f->mutex);
f->size--;
CondSignal(f->cond);
MutexUnlock(f->mutex);
}
/**
* 剩余帧
* @param f
* @return
*/
int frame_queue_nb_remaining(FrameQueue *f) {
return f->size - f->rindex_shown;
}
/**
* 上一个位置
* @param f
* @return
*/
int64_t frame_queue_last_pos(FrameQueue *f) {
Frame *fp = &f->queue[f->rindex];
if (f->rindex_shown && fp->serial == f->pktq->serial) {
return fp->pos;
} else {
return -1;
}
}
#ifdef __cplusplus
};
#endif
到这里,我们就得到了PacketQueue 和FrameQueue 文件,这些文件是独立出来的,跟播放器关联不大。好了接下来我们将解码器和同步时钟相关的方法也提取出来:
MediaDecoder.h:
#ifndef CAINPLAYER_MEDIADECODER_H
#define CAINPLAYER_MEDIADECODER_H
#include "ffplay_def.h"
#ifdef __cplusplus
extern "C" {
#endif
// 解码器初始化
void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, Cond *empty_queue_cond);
// 解码器解码帧
int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub);
// 销毁解码器
void decoder_destroy(Decoder *d);
// 解码器开始解码
int decoder_start(Decoder *d, int (*fn)(void *), void *arg);
// 解码器取消解码
void decoder_abort(Decoder *d, FrameQueue *fq);
#ifdef __cplusplus
};
#endif
#endif //CAINPLAYER_MEDIADECODER_H
MediaDecoder.cpp:
#include "MediaDecoder.h"
#include "PacketQueue.h"
#include "FrameQueue.h"
#ifdef __cplusplus
extern "C" {
#endif
extern AVPacket flush_pkt;
/**
* 解码器初始化
* @param d
* @param avctx
* @param queue
* @param empty_queue_cond
*/
void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, Cond *empty_queue_cond) {
memset(d, 0, sizeof(Decoder));
d->avctx = avctx;
d->queue = queue;
d->empty_queue_cond = empty_queue_cond;
d->start_pts = AV_NOPTS_VALUE;
}
/**
* 解码器解码帧
* @param d
* @param frame
* @param sub
* @return
*/
int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int got_frame = 0;
do {
int ret = -1;
if (d->queue->abort_request) {
return -1;
}
if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
AVPacket pkt;
do {
if (d->queue->nb_packets == 0)
CondSignal(d->empty_queue_cond);
if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
return -1;
if (pkt.data == flush_pkt.data) {
avcodec_flush_buffers(d->avctx);
d->finished = 0;
d->next_pts = d->start_pts;
d->next_pts_tb = d->start_pts_tb;
}
} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
av_packet_unref(&d->pkt);
d->pkt_temp = d->pkt = pkt;
d->packet_pending = 1;
}
switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
// if (decoder_reorder_pts == -1) {
// frame->pts = av_frame_get_best_effort_timestamp(frame);
// } else if (!decoder_reorder_pts) {
// frame->pts = frame->pkt_dts;
// }
frame->pts = av_frame_get_best_effort_timestamp(frame);
}
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
AVRational tb = (AVRational){1, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
d->next_pts = frame->pts + frame->nb_samples;
d->next_pts_tb = tb;
}
}
break;
case AVMEDIA_TYPE_SUBTITLE:
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
break;
}
if (ret < 0) {
d->packet_pending = 0;
} else {
d->pkt_temp.dts =
d->pkt_temp.pts = AV_NOPTS_VALUE;
if (d->pkt_temp.data) {
if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
ret = d->pkt_temp.size;
d->pkt_temp.data += ret;
d->pkt_temp.size -= ret;
if (d->pkt_temp.size <= 0)
d->packet_pending = 0;
} else {
if (!got_frame) {
d->packet_pending = 0;
d->finished = d->pkt_serial;
}
}
}
} while (!got_frame && !d->finished);
return got_frame;
}
/**
* 销毁解码器
* @param d
*/
void decoder_destroy(Decoder *d) {
av_packet_unref(&d->pkt);
avcodec_free_context(&d->avctx);
}
/**
* 解码器开始解码
* @param d
* @param fn
* @param arg
* @return
*/
int decoder_start(Decoder *d, int (*fn)(void *), void *arg) {
packet_queue_start(d->queue);
d->decoder_tid = ThreadCreate(fn, arg, "decoder");
if (!d->decoder_tid) {
av_log(NULL, AV_LOG_ERROR, "ThreadCreate(): %s\n", SDL_GetError());
return AVERROR(ENOMEM);
}
return 0;
}
/**
* 解码器取消解码
* @param d
* @param fq
*/
void decoder_abort(Decoder *d, FrameQueue *fq) {
packet_queue_abort(d->queue);
frame_queue_signal(fq);
ThreadWait(d->decoder_tid, NULL);
d->decoder_tid = NULL;
packet_queue_flush(d->queue);
}
#ifdef __cplusplus
};
#endif
解码器部分,依赖了 flush_pkt 和 decoder_reorder_pts。其中decoder_reorder_pts 是用于重排pts 的,这个参数,我们默认是重排,所以将ffplay的decoder_decode_frame方法中的判断注释掉:
// if (decoder_reorder_pts == -1) {
// frame->pts = av_frame_get_best_effort_timestamp(frame);
// } else if (!decoder_reorder_pts) {
// frame->pts = frame->pkt_dts;
// }
frame->pts = av_frame_get_best_effort_timestamp(frame);
Clock.h:
#ifndef CAINPLAYER_CLOCK_H
#define CAINPLAYER_CLOCK_H
#include "ffplay_def.h"
#ifdef __cplusplus
extern "C" {
#endif
// 获取时钟
double get_clock(Clock *c);
// 设置时钟
void set_clock_at(Clock *c, double pts, int serial, double time);
// 设置时钟
void set_clock(Clock *c, double pts, int serial);
// 设置时钟速度
void set_clock_speed(Clock *c, double speed);
// 初始化时钟
void init_clock(Clock *c, int *queue_serial);
// 同步到从属时钟
void sync_clock_to_slave(Clock *c, Clock *slave);
#ifdef __cplusplus
};
#endif
#endif //CAINPLAYER_CLOCK_H
Clock.cpp:
#include "Clock.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* 获取时钟
* @param c
* @return
*/
double get_clock(Clock *c) {
if (*c->queue_serial != c->serial) {
return NAN;
}
if (c->paused) {
return c->pts;
} else {
double time = av_gettime_relative() / 1000000.0;
return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
}
}
/**
* 设置时钟
* @param c
* @param pts
* @param serial
* @param time
*/
void set_clock_at(Clock *c, double pts, int serial, double time) {
c->pts = pts;
c->last_updated = time;
c->pts_drift = c->pts - time;
c->serial = serial;
}
/**
* 设置时钟
* @param c
* @param pts
* @param serial
*/
void set_clock(Clock *c, double pts, int serial) {
double time = av_gettime_relative() / 1000000.0;
set_clock_at(c, pts, serial, time);
}
/**
* 设置时钟速度
* @param c
* @param speed
*/
void set_clock_speed(Clock *c, double speed) {
set_clock(c, get_clock(c), c->serial);
c->speed = speed;
}
/**
* 初始化时钟
* @param c
* @param queue_serial
*/
void init_clock(Clock *c, int *queue_serial) {
c->speed = 1.0;
c->paused = 0;
c->queue_serial = queue_serial;
set_clock(c, NAN, -1);
}
/**
* 同步到从属时钟
* @param c
* @param slave
*/
void sync_clock_to_slave(Clock *c, Clock *slave) {
double clock = get_clock(c);
double slave_clock = get_clock(slave);
if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD)) {
set_clock(c, slave_clock, slave->serial);
}
}
#ifdef __cplusplus
};
#endif
到这里,我们又将解码器和同步时钟的方法抽取出来了。
接下来,我们用创造锁和线程,用于替换掉SDL中的锁和线程:
Thread.h:
#ifndef CAINCAMERA_THREAD_H
#define CAINCAMERA_THREAD_H
#ifdef __cplusplus
extern "C" {
#endif
#include "Mutex.h"
// 线程优先级
typedef enum {
THREAD_PRIORITY_LOW,
THREAD_PRIORITY_NORMAL,
THREAD_PRIORITY_HIGH
} ThreadPriority;
// 线程结构
typedef struct Thread
{
pthread_t id;
int (*func)(void *);
void *data;
char name[32];
int retval;
} Thread;
// 创建线程
Thread *ThreadCreate(int (*fun)(void *), void *data, const char *name);
// 设置线程优先级
int ThreadSetPriority(ThreadPriority priority);
// 等待线程
void ThreadWait(Thread *thread, int *status);
// 解绑线程
void ThreadDetach(Thread *thread);
// 销毁线程
void ThreadDestroy(Thread *thread);
#ifdef __cplusplus
};
#endif
#endif //CAINCAMERA_THREAD_H
Thread.cpp:
#include <unistd.h>
#include <malloc.h>
#include "native_log.h"
#include "Thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <errno.h>
#include <assert.h>
/**
* 线程函数
* @param data
* @return
*/
static void *ThreadRun(void *data) {
Thread *thread = (Thread *) data;
ALOGI("ThreadRun: [%d] %s\n", (int) gettid(), thread->name);
pthread_setname_np(pthread_self(), thread->name);
thread->retval = thread->func(thread->data);
ThreadDetach(thread);
return NULL;
}
/**
* 创建线程
* @param fn
* @param data
* @param name
* @return
*/
Thread *ThreadCreate(int (*fun)(void *), void *data, const char *name) {
Thread *thread = (Thread *) malloc(sizeof(Thread));
thread->func = fun;
thread->data = data;
strlcpy(thread->name, name, sizeof(thread->name) - 1);
// 创建线程
int retval = pthread_create(&thread->id, NULL, ThreadRun, thread);
if (retval) {
return NULL;
}
return thread;
}
/**
* 设置线程优先级
* @param priority
* @return
*/
int ThreadSetPriority(ThreadPriority priority) {
struct sched_param sched;
int policy;
// 获取当前线程
pthread_t thread = pthread_self();
// 获取线程优先级参数
if (pthread_getschedparam(thread, &policy, &sched) < 0) {
ALOGE("call pthread_getschedparam() failed!\n");
return -1;
}
if (priority == THREAD_PRIORITY_LOW) {
sched.sched_priority = sched_get_priority_min(policy);
} else if (priority == THREAD_PRIORITY_HIGH) {
sched.sched_priority = sched_get_priority_max(policy);
} else {
int min_priority = sched_get_priority_min(policy);
int max_priority = sched_get_priority_max(policy);
sched.sched_priority = (min_priority + (max_priority - min_priority) / 2);
}
// 设置线程优先级
if (pthread_setschedparam(thread, policy, &sched) < 0) {
ALOGE("call pthread_setschedparam() failed");
return -1;
}
return 0;
}
/**
* 等待线程
* @param thread
* @param status
*/
void ThreadWait(Thread *thread, int *status) {
assert(thread);
if (!thread) {
return;
}
// 等待线程结束
pthread_join(thread->id, NULL);
if (status) {
*status = thread->retval;
}
}
/**
* 解绑线程
* @param thread
*/
void ThreadDetach(Thread *thread) {
assert(thread);
if (!thread) {
return;
}
// 解绑线程
pthread_detach(thread->id);
}
/**
* 销毁线程
* @param thread
*/
void ThreadDestroy(Thread * thread) {
if (thread) {
ThreadWait(thread, NULL);
free(thread);
thread = NULL;
}
}
#ifdef __cplusplus
}
#endif
Mutex.h:
#ifndef CAINCAMERA_MUTEX_H
#define CAINCAMERA_MUTEX_H
#ifdef __cplusplus
extern "C" {
#endif
#include <pthread.h>
#include <stdint.h>
#define MUTEX_TIMEDOUT 1
#define MUTEX_MAXWAIT (~(uint32_t)0)
// 互斥锁结构
typedef struct Mutex {
pthread_mutex_t id;
} Mutex;
// 创建互斥锁
Mutex *MutexCreate(void);
// 销毁互斥锁
void MutexDestroy(Mutex *mutex);
// 销毁互斥锁指针
void MutexDestroyPointer(Mutex **pMutex);
// 上锁
int MutexLock(Mutex *mutex);
// 解锁
int MutexUnlock(Mutex *mutex);
// 条件锁结构
typedef struct Cond {
pthread_cond_t id;
} Cond;
// 创建条件锁
Cond *CondCreate(void);
// 销毁条件锁
void CondDestroy(Cond *cond);
// 销毁条件锁指针
void CondDestroyPointer(Cond **pCond);
// 条件锁信号
int CondSignal(Cond *cond);
// 条件锁广播
int CondBroadcast(Cond *cond);
// 等待条件锁
int CondWait(Cond *cond, Mutex *mutex);
// 等待条件锁多少秒
int CondWaitTimeout(Cond *cond, Mutex *mutex, uint32_t msec);
// 出错信息
const char *GetError(void);
#ifdef __cplusplus
}
#endif
#endif //CAINCAMERA_MUTEX_H
Mutex.cpp:
#include "Mutex.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <memory.h>
/**
* 创建互斥锁
* @return
*/
Mutex *MutexCreate(void) {
Mutex *mutex;
mutex = (Mutex *) malloc(sizeof(Mutex));
if (!mutex) {
return NULL;
}
memset(mutex, 0, sizeof(Mutex));
if (pthread_mutex_init(&mutex->id, NULL) != 0) {
free(mutex);
return NULL;
}
return mutex;
}
/**
* 销毁互斥锁
* @param mutex
*/
void MutexDestroy(Mutex *mutex) {
if (mutex) {
pthread_mutex_destroy(&mutex->id);
free(mutex);
}
}
/**
* 销毁互斥锁指针
* @param mutex
*/
void MutexDestroyPointer(Mutex **mutex) {
if (mutex) {
MutexDestroy(*mutex);
*mutex = NULL;
}
}
/**
* 上互斥锁
* @param mutex
* @return
*/
int MutexLock(Mutex *mutex) {
assert(mutex);
if (!mutex) {
return -1;
}
return pthread_mutex_lock(&mutex->id);
}
/**
* 解除互斥锁
* @param mutex
* @return
*/
int MutexUnlock(Mutex *mutex) {
assert(mutex);
if (!mutex) {
return -1;
}
return pthread_mutex_unlock(&mutex->id);
}
/**
* 创建条件锁
* @return
*/
Cond *CondCreate(void) {
Cond *cond;
cond = (Cond *) malloc(sizeof(Cond));
if (!cond) {
return NULL;
}
memset(cond, 0, sizeof(Cond));
if (pthread_cond_init(&cond->id, NULL) != 0) {
free(cond);
return NULL;
}
return cond;
}
/**
* 销毁条件锁
* @param cond
*/
void CondDestroy(Cond *cond) {
if (cond) {
pthread_cond_destroy(&cond->id);
free(cond);
}
}
/**
* 销毁条件锁指针
* @param cond
*/
void CondDestroyPointer(Cond **cond) {
if (cond) {
CondDestroy(*cond);
*cond = NULL;
}
}
/**
* 条件锁信号
* @param cond
* @return
*/
int CondSignal(Cond *cond) {
assert(cond);
if (!cond) {
return -1;
}
return pthread_cond_signal(&cond->id);
}
/**
* 条件锁广播,用于唤醒多个条件变量
* @param cond
* @return
*/
int CondBroadcast(Cond *cond) {
assert(cond);
if (!cond) {
return -1;
}
return pthread_cond_broadcast(&cond->id);
}
/**
* 等待条件锁
* @param cond
* @param mutex
* @return
*/
int CondWait(Cond *cond, Mutex *mutex) {
assert(cond);
assert(mutex);
if (!cond || !mutex) {
return -1;
}
return pthread_cond_wait(&cond->id, &mutex->id);
}
/**
* 等待条件锁多少秒
* @param cond
* @param mutex
* @param ms
* @return
*/
int CondWaitTimeout(Cond *cond, Mutex *mutex, uint32_t ms) {
int retval;
struct timeval delta;
struct timespec abstime;
assert(cond);
assert(mutex);
if (!cond || !mutex) {
return -1;
}
gettimeofday(&delta, NULL);
abstime.tv_sec = delta.tv_sec + (time_t)(ms / 1000);
abstime.tv_nsec = (time_t) (delta.tv_usec + (ms % 1000) * 1000) * 1000;
if (abstime.tv_nsec > 1000000000) {
abstime.tv_sec++;
abstime.tv_nsec -= 1000000000;
}
while (1) {
retval = pthread_cond_timedwait(&cond->id, &mutex->id, &abstime);
if (retval == 0) {
return 0;
} else if (retval == EINTR) {
continue;
} else if (retval == ETIMEDOUT) {
return MUTEX_TIMEDOUT;
} else {
break;
}
}
return -1;
}
/**
* 出错信息
* @return
*/
const char *GetError(void) {
return NULL;
}
#ifdef __cplusplus
}
#endif
至此,我们将ffplay的很多独立的方法移植过来了。那么接下来,我们开始造播放器了:
CainPlayer.h:
#ifndef CAINPLAYER_CAINPLAYER_H
#define CAINPLAYER_CAINPLAYER_H
#include "ffplay_def.h"
class CainPlayer {
public:
// 刷新画面用的方法
int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture);
// 计算显示的区域
void calculate_display_rect(SDL_Rect *rect,
int scr_xleft, int scr_ytop, int scr_width, int scr_height,
int pic_width, int pic_height, AVRational pic_sar);
// 将视频帧数据上载到Texture中
int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx);
// 显示画面
void video_image_display(VideoState *is);
// 打开媒体流
int stream_component_open(VideoState *is, int stream_index);
// 打开文件
VideoState *stream_open(const char *filename);
// 关闭媒体流
void stream_component_close(VideoState *is, int stream_index);
// 关闭媒体流
void stream_close(VideoState *is);
// 退出播放器
void exitPlayer();
// 退出播放器
void do_exit(VideoState *is);
// 回调
static void sigterm_handler(int sig);
// 计算大小
void set_default_window_size(int width, int height, AVRational sar);
// 打开视频,创建渲染器用的
int video_open(VideoState *is);
// 视频显示
void video_display(VideoState *is);
// 同步类型
int get_master_sync_type(VideoState *is);
// 获取主时钟
double get_master_clock(VideoState *is);
// 检查外部时钟速度
void check_external_clock_speed(VideoState *is);
// 定位操作
void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes);
// 暂停/播放
void stream_toggle_pause(VideoState *is);
// 暂停/播放
void toggle_pause(VideoState *is);
// 是否静音
void toggle_mute(VideoState *is);
// 更新音量
void update_volume(VideoState *is, int sign, double step);
// 跳转下一帧
void step_to_next_frame(VideoState *is);
// 计算延时
double compute_target_delay(double delay, VideoState *is);
// 计算显示间隔
double vp_duration(VideoState *is, Frame *vp, Frame *nextvp);
// 更新pts
void update_video_pts(VideoState *is, double pts, int64_t pos, int serial);
// 刷新视频画面
void video_refresh(void *opaque, double *remaining_time);
// 将解码后的视频帧入队
int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial);
// 获取视频帧
int get_video_frame(VideoState *is, AVFrame *frame);
// 线程
static int read_thread(void *arg);
static int audio_thread(void *arg);
static int video_thread(void *arg);
int demux();
int audioDecode();
int videoDecode();
// 同步音频
int synchronize_audio(VideoState *is, int nb_samples);
// 音频解码
int audio_decode_frame(VideoState *is);
// 音频回调
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len);
// 音频回调
void audio_callback(Uint8 *stream, int len);
// 打开音频设备
int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params);
// 解码中断回调
static int decode_interrupt_cb(void *ctx);
// 判断媒体流中是否存在足够的裸数据包
int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue);
// 是否实时流
static int is_realtime(AVFormatContext *s);
// 视频刷新线程
static int video_refresh_thread(void *arg);
void refreshVideo(void);
// 刷新事件画面
void event_loop(VideoState *cur_stream);
static int lockmgr(void **mtx, enum AVLockOp op);
// 播放方法(main)
int play(int argc, char **argv);
private:
VideoState *is;
AVDictionary *sws_dict;
AVDictionary *swr_opts;
AVDictionary *format_opts, *codec_opts, *resample_opts;
int default_width = 640;
int default_height = 480;
int screen_width = 1080;
int screen_height = 1920;
int frameWidth; // 帧的宽度,用于判断是否需要重新创建Texture
int frameHeight; // 帧的高度,用于判断是否需要重新创建Texture
int audio_disable; // 禁用音频流
int video_disable; // 禁用视频流
const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
int seek_by_bytes = -1; // 以字节方式定位,用于ogg等格式
int display_disable; // 禁用显示
int startup_volume = 100; // 初始音频
int av_sync_type = AV_SYNC_AUDIO_MASTER; // 音视频同步方式,默认是同步到音频
int64_t start_time = AV_NOPTS_VALUE; // 开始播放的时间
int64_t duration = AV_NOPTS_VALUE; // 时长
int fast = 0;
int genpts = 0;
int lowres = 0;
int autoexit; // 播放结束自动退出
int loop = 1; // 循环播放
int framedrop = -1; // 舍帧操作,如果音频远比视频超前,则舍帧
int infinite_buffer = -1; // 无限缓冲区,用于流媒体缓存
enum ShowMode show_mode = SHOW_MODE_NONE; // 显示模式
/* current context */
int64_t audio_callback_time; // 音频回调时间
#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
SDL_Window *window;
SDL_Renderer *renderer;
};
#endif //CAINPLAYER_CAINPLAYER_H
这里,我们将ffplay中的核心方法放到CainPlayer类里面,又将本来定义在ffplay文件头部的一些参数用作CainPlayer 的变量。接下来,我们将核心的方法移植过来。本人在移植过程中,将AVFilter 和 字幕相关的方法都已经删掉了。并且,SDL的锁和线程都用刚才实现的Mutex 和 Thread 替换掉了。是为了以后可以将整个SDL 替换掉,逐步向ijkplayer 的方向靠拢。
CainPlayer.cpp:
#include "CainPlayer.h"
#include "PacketQueue.h"
#include "FrameQueue.h"
#include "MediaDecoder.h"
#include "Clock.h"
#include "CmdUtils.h"
AVPacket flush_pkt;
/**
* 重新创建Texture
* @param texture
* @param new_format
* @param new_width
* @param new_height
* @param blendmode
* @param init_texture
* @return
*/
int CainPlayer::realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture) {
Uint32 format;
if (new_width != frameWidth || new_height != frameHeight || new_format != format) {
frameWidth = new_width;
frameHeight = new_height;
void *pixels;
int pitch;
if (*texture != NULL) {
SDL_DestroyTexture(*texture);
}
if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height))) {
return -1;
}
if (SDL_SetTextureBlendMode(*texture, blendmode) < 0) {
return -1;
}
if (init_texture) {
if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0) {
return -1;
}
memset(pixels, 0, pitch * new_height);
SDL_UnlockTexture(*texture);
}
}
return 0;
}
/**
* 计算显示区域
* @param rect
* @param scr_xleft
* @param scr_ytop
* @param scr_width
* @param scr_height
* @param pic_width
* @param pic_height
* @param pic_sar
*/
void CainPlayer::calculate_display_rect(SDL_Rect *rect,
int scr_xleft, int scr_ytop, int scr_width, int scr_height,
int pic_width, int pic_height, AVRational pic_sar) {
float aspect_ratio;
int width, height, x, y;
if (pic_sar.num == 0) {
aspect_ratio = 0;
} else {
aspect_ratio = av_q2d(pic_sar);
}
if (aspect_ratio <= 0.0) {
aspect_ratio = 1.0;
}
aspect_ratio *= (float)pic_width / (float)pic_height;
/* XXX: we suppose the screen has a 1.0 pixel ratio */
height = scr_height;
width = lrint(height * aspect_ratio) & ~1;
if (width > scr_width) {
width = scr_width;
height = lrint(width / aspect_ratio) & ~1;
}
x = (scr_width - width) / 2;
y = (scr_height - height) / 2;
rect->x = scr_xleft + x;
rect->y = scr_ytop + y;
rect->w = FFMAX(width, 1);
rect->h = FFMAX(height, 1);
}
/**
* 上载texture
* @param tex
* @param frame
* @param img_convert_ctx
* @return
*/
int CainPlayer::upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
int ret = 0;
switch (frame->format) {
case AV_PIX_FMT_YUV420P:
if (frame->linesize[0] < 0 || frame->linesize[1] < 0 || frame->linesize[2] < 0) {
av_log(NULL, AV_LOG_ERROR, "Negative linesize is not supported for YUV.\n");
return -1;
}
ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0],
frame->data[1], frame->linesize[1],
frame->data[2], frame->linesize[2]);
break;
case AV_PIX_FMT_BGRA:
if (frame->linesize[0] < 0) {
ret = SDL_UpdateTexture(tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
} else {
ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]);
}
break;
default:
/* This should only happen if we are not using avfilter... */
*img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
frame->width, frame->height,
(AVPixelFormat) frame->format, frame->width, frame->height,
AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL);
if (*img_convert_ctx != NULL) {
uint8_t *pixels[4];
int pitch[4];
if (!SDL_LockTexture(tex, NULL, (void **)pixels, pitch)) {
sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
0, frame->height, pixels, pitch);
SDL_UnlockTexture(tex);
}
} else {
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
ret = -1;
}
break;
}
return ret;
}
/**
* 显示视频
* @param is
*/
void CainPlayer::video_image_display(VideoState *is) {
Frame *vp;
SDL_Rect rect;
vp = frame_queue_peek_last(&is->videoFrameQueue);
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
if (!vp->uploaded) {
int sdl_pix_fmt = vp->frame->format == AV_PIX_FMT_YUV420P ? SDL_PIXELFORMAT_YV12 : SDL_PIXELFORMAT_ARGB8888;
if (realloc_texture(&is->vid_texture, sdl_pix_fmt, vp->frame->width, vp->frame->height, SDL_BLENDMODE_NONE, 0) < 0)
return;
if (upload_texture(is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
return;
vp->uploaded = 1;
vp->flip_v = vp->frame->linesize[0] < 0;
}
SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL,
(const SDL_RendererFlip) (vp->flip_v ? SDL_FLIP_VERTICAL : 0));
}
/**
* 打开媒体流
* @param is
* @param stream_index
* @return
*/
int CainPlayer::stream_component_open(VideoState *is, int stream_index) {
AVFormatContext *ic = is->ic;
AVCodecContext *avctx;
AVCodec *codec;
AVDictionary *opts = NULL;
AVDictionaryEntry *t = NULL;
int sample_rate, nb_channels;
int64_t channel_layout;
int ret = 0;
int stream_lowres = lowres;
if (stream_index < 0 || stream_index >= ic->nb_streams) {
return -1;
}
// 创建解码上下文
avctx = avcodec_alloc_context3(NULL);
if (!avctx) {
return AVERROR(ENOMEM);
}
// 复制上下文参数
ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
if (ret < 0) {
goto fail;
}
// 设定时钟基准
av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
// 创建解码器
codec = avcodec_find_decoder(avctx->codec_id);
// 判断是否成功得到解码器
if (!codec) {
av_log(NULL, AV_LOG_WARNING,
"No codec could be found with id %d\n", avctx->codec_id);
ret = AVERROR(EINVAL);
goto fail;
}
avctx->codec_id = codec->id;
if(stream_lowres > av_codec_get_max_lowres(codec)){
av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
av_codec_get_max_lowres(codec));
stream_lowres = av_codec_get_max_lowres(codec);
}
av_codec_set_lowres(avctx, stream_lowres);
#if FF_API_EMU_EDGE
if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
#endif
if (fast) {
avctx->flags2 |= AV_CODEC_FLAG2_FAST;
}
#if FF_API_EMU_EDGE
if(codec->capabilities & AV_CODEC_CAP_DR1) {
avctx->flags |= CODEC_FLAG_EMU_EDGE;
}
#endif
opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
if (!av_dict_get(opts, "threads", NULL, 0)) {
av_dict_set(&opts, "threads", "auto", 0);
}
if (stream_lowres) {
av_dict_set_int(&opts, "lowres", stream_lowres, 0);
}
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
av_dict_set(&opts, "refcounted_frames", "1", 0);
}
if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
goto fail;
}
if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
ret = AVERROR_OPTION_NOT_FOUND;
goto fail;
}
is->eof = 0;
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
// 计算采样率、声道等
sample_rate = avctx->sample_rate;
nb_channels = avctx->channels;
channel_layout = avctx->channel_layout;
/* prepare audio output */
if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0) {
goto fail;
}
is->audio_hw_buf_size = ret;
is->audio_src = is->audio_tgt;
is->audio_buf_size = 0;
is->audio_buf_index = 0;
/* init averaging filter */
is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
is->audio_diff_avg_count = 0;
/* since we do not have a precise anough audio FIFO fullness,
we correct audio sync only if larger than this threshold */
is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
is->audioStreamIdx = stream_index;
is->audioStream = ic->streams[stream_index];
// 初始化解码器
decoder_init(&is->audioDecoder, avctx, &is->audioQueue, is->readCondition);
if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK))
&& !is->ic->iformat->read_seek) {
is->audioDecoder.start_pts = is->audioStream->start_time;
is->audioDecoder.start_pts_tb = is->audioStream->time_base;
}
// 开启解码线程
if ((ret = decoder_start(&is->audioDecoder, audio_thread, this)) < 0) {
goto out;
}
SDL_PauseAudio(0);
break;
case AVMEDIA_TYPE_VIDEO:
is->videoStreamIdx = stream_index;
is->videoStream = ic->streams[stream_index];
decoder_init(&is->videoDecoder, avctx, &is->videoQueue, is->readCondition);
if ((ret = decoder_start(&is->videoDecoder, video_thread, this)) < 0) {
goto out;
}
is->queue_attachments_req = 1;
break;
default:
break;
}
goto out;
fail:
avcodec_free_context(&avctx);
out:
av_dict_free(&opts);
return ret;
}
/**
* 打开媒体流
* @param filename
* @param iformat
* @return
*/
VideoState *CainPlayer::stream_open(const char *filename) {
VideoState *is;
is = (VideoState *)av_mallocz(sizeof(VideoState));
if (!is) {
return NULL;
}
is->filename = av_strdup(filename);
if (!is->filename) {
goto fail;
}
is->ytop = 0;
is->xleft = 0;
// 初始化帧队列
if (frame_queue_init(&is->videoFrameQueue, &is->videoQueue, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0) {
goto fail;
}
if (frame_queue_init(&is->audioFrameQueue, &is->audioQueue, SAMPLE_QUEUE_SIZE, 1) < 0) {
goto fail;
}
// 初始化裸数据包队列
if (packet_queue_init(&is->videoQueue) < 0 ||
packet_queue_init(&is->audioQueue) < 0) {
goto fail;
}
// 创建读文件条件锁
if (!(is->readCondition = CondCreate())) {
av_log(NULL, AV_LOG_FATAL, "CondCreate(): %s\n", SDL_GetError());
goto fail;
}
// 初始化时钟
init_clock(&is->videoClock, &is->videoQueue.serial);
init_clock(&is->audioClock, &is->audioQueue.serial);
init_clock(&is->extClock, &is->extClock.serial);
is->audio_clock_serial = -1;
// 计算起始音量大小
if (startup_volume < 0) {
av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
}
if (startup_volume > 100) {
av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
}
startup_volume = av_clip(startup_volume, 0, 100);
startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
is->audio_volume = startup_volume;
is->muted = 0;
is->av_sync_type = av_sync_type;
is->videRefreshThread = ThreadCreate(video_refresh_thread, this, "refreshThread");
if (!is->videRefreshThread) {
av_log(NULL, AV_LOG_FATAL, "ThreadCreate(): %s\n", SDL_GetError());
goto fail;
}
// 创建读文件线程
is->readThread = ThreadCreate(read_thread, this, "read_thread");
if (!is->readThread) {
av_log(NULL, AV_LOG_FATAL, "ThreadCreate(): %s\n", SDL_GetError());
fail:
stream_close(is);
return NULL;
}
return is;
}
/**
* 关闭媒体流
* @param is
* @param stream_index
*/
void CainPlayer::stream_component_close(VideoState *is, int stream_index) {
AVFormatContext *ic = is->ic;
AVCodecParameters *codecpar;
if (stream_index < 0 || stream_index >= ic->nb_streams)
return;
codecpar = ic->streams[stream_index]->codecpar;
switch (codecpar->codec_type) {
case AVMEDIA_TYPE_AUDIO:
decoder_abort(&is->audioDecoder, &is->audioFrameQueue);
SDL_CloseAudio();
decoder_destroy(&is->audioDecoder);
swr_free(&is->swr_ctx);
av_freep(&is->audio_buf1);
is->audio_buf1_size = 0;
is->audio_buf = NULL;
break;
case AVMEDIA_TYPE_VIDEO:
decoder_abort(&is->videoDecoder, &is->videoFrameQueue);
decoder_destroy(&is->videoDecoder);
break;
default:
break;
}
ic->streams[stream_index]->discard = AVDISCARD_ALL;
switch (codecpar->codec_type) {
case AVMEDIA_TYPE_AUDIO:
is->audioStream = NULL;
is->audioStreamIdx = -1;
break;
case AVMEDIA_TYPE_VIDEO:
is->videoStream = NULL;
is->videoStreamIdx = -1;
break;
default:
break;
}
}
/**
* 关闭媒体流
* @param is
*/
void CainPlayer::stream_close(VideoState *is) {
// 等待读文件线程退出
is->abort_request = 1;
ThreadWait(is->readThread, NULL);
/* close each stream */
if (is->audioStreamIdx >= 0) {
stream_component_close(is, is->audioStreamIdx);
}
if (is->videoStreamIdx >= 0) {
stream_component_close(is, is->videoStreamIdx);
}
// 关闭输入上下文
avformat_close_input(&is->ic);
// 等待刷新画面线程退出
ThreadWait(is->videRefreshThread, NULL);
// 销毁裸数据包队列
packet_queue_destroy(&is->videoQueue);
packet_queue_destroy(&is->audioQueue);
// 销毁帧队列
frame_queue_destory(&is->videoFrameQueue);
frame_queue_destory(&is->audioFrameQueue);
// 销毁读文件条件锁
CondDestroy(is->readCondition);
// 销毁转码上下文
sws_freeContext(is->img_convert_ctx);
av_free(is->filename);
if (is->vid_texture) {
SDL_DestroyTexture(is->vid_texture);
}
av_free(is);
}
/**
* 退出播放器
*/
void CainPlayer::exitPlayer() {
do_exit(is);
}
/**
* 退出播放器
* @param is
*/
void CainPlayer::do_exit(VideoState *is) {
if (is) {
stream_close(is);
}
if (renderer) {
SDL_DestroyRenderer(renderer);
}
if (window) {
SDL_DestroyWindow(window);
}
av_lockmgr_register(NULL);
av_dict_free(&swr_opts);
av_dict_free(&sws_dict);
av_dict_free(&format_opts);
av_dict_free(&codec_opts);
av_dict_free(&resample_opts);
avformat_network_deinit();
SDL_Quit();
av_log(NULL, AV_LOG_QUIET, "%s", "");
exit(0);
}
void CainPlayer::sigterm_handler(int sig) {
exit(123);
}
/**
* 设置大小
* @param width
* @param height
* @param sar
*/
void CainPlayer::set_default_window_size(int width, int height, AVRational sar) {
SDL_Rect rect;
calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
default_width = rect.w;
default_height = rect.h;
}
/**
* 打开视频,主要用于创建视频帧的渲染器
* @param is
* @return
*/
int CainPlayer::video_open(VideoState *is) {
int w,h;
if (screen_width) {
w = screen_width;
h = screen_height;
} else {
w = default_width;
h = default_height;
}
if (!window) {
int flags = SDL_WINDOW_SHOWN;
flags |= SDL_WINDOW_RESIZABLE;
window = SDL_CreateWindow(NULL, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
if (window) {
SDL_RendererInfo info;
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
if (!renderer) {
av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
renderer = SDL_CreateRenderer(window, -1, 0);
}
if (renderer) {
if (!SDL_GetRendererInfo(renderer, &info))
av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
}
}
} else {
SDL_SetWindowSize(window, w, h);
}
if (!window || !renderer) {
av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
do_exit(is);
}
is->width = w;
is->height = h;
return 0;
}
/**
* 视频显示
* @param is
*/
void CainPlayer::video_display(VideoState *is) {
if (!window) {
video_open(is);
}
if (is->show_mode == SHOW_MODE_VIDEO) {
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderClear(renderer);
if (is->videoStream) {
video_image_display(is);
}
SDL_RenderPresent(renderer);
}
}
/**
* 获取主同步类型
* @param is
* @return
*/
int CainPlayer::get_master_sync_type(VideoState *is) {
if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
if (is->videoStream) {
return AV_SYNC_VIDEO_MASTER;
} else {
return AV_SYNC_AUDIO_MASTER;
}
} else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
if (is->audioStream) {
return AV_SYNC_AUDIO_MASTER;
} else {
return AV_SYNC_EXTERNAL_CLOCK;
}
} else {
return AV_SYNC_EXTERNAL_CLOCK;
}
}
/**
* 获取主时钟
* @param is
* @return
*/
double CainPlayer::get_master_clock(VideoState *is) {
double val;
switch (get_master_sync_type(is)) {
case AV_SYNC_VIDEO_MASTER:
val = get_clock(&is->videoClock);
break;
case AV_SYNC_AUDIO_MASTER:
val = get_clock(&is->audioClock);
break;
default:
val = get_clock(&is->extClock);
break;
}
return val;
}
/**
* 检查外部时钟速度
* @param is
*/
void CainPlayer::check_external_clock_speed(VideoState *is) {
if (is->videoStreamIdx >= 0 && is->videoQueue.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
is->audioStreamIdx >= 0 && is->audioQueue.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
set_clock_speed(&is->extClock, FFMAX(EXTERNAL_CLOCK_SPEED_MIN,
is->extClock.speed - EXTERNAL_CLOCK_SPEED_STEP));
} else if ((is->videoStreamIdx < 0 || is->videoQueue.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
(is->audioStreamIdx < 0 || is->audioQueue.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
set_clock_speed(&is->extClock, FFMIN(EXTERNAL_CLOCK_SPEED_MAX,
is->extClock.speed + EXTERNAL_CLOCK_SPEED_STEP));
} else {
double speed = is->extClock.speed;
if (speed != 1.0) {
set_clock_speed(&is->extClock,
speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
}
}
}
/**
* 定位
* @param is
* @param pos
* @param rel
* @param seek_by_bytes
*/
void CainPlayer::stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes) {
if (!is->seek_req) {
is->seek_pos = pos;
is->seek_rel = rel;
is->seek_flags &= ~AVSEEK_FLAG_BYTE;
if (seek_by_bytes) {
is->seek_flags |= AVSEEK_FLAG_BYTE;
}
is->seek_req = 1;
CondSignal(is->readCondition);
}
}
/**
* 暂停/播放
* @param is
*/
void CainPlayer::stream_toggle_pause(VideoState *is) {
if (is->paused) {
is->frame_timer += av_gettime_relative() / 1000000.0 - is->videoClock.last_updated;
if (is->read_pause_return != AVERROR(ENOSYS)) {
is->videoClock.paused = 0;
}
set_clock(&is->videoClock, get_clock(&is->videoClock), is->videoClock.serial);
}
set_clock(&is->extClock, get_clock(&is->extClock), is->extClock.serial);
is->paused = is->audioClock.paused = is->videoClock.paused = is->extClock.paused = !is->paused;
}
/**
* 暂停/播放
* @param is
*/
void CainPlayer::toggle_pause(VideoState *is) {
stream_toggle_pause(is);
is->step = 0;
}
/**
* 静音/播放声音
* @param is
*/
void CainPlayer::toggle_mute(VideoState *is) {
is->muted = !is->muted;
}
/**
* 更新音频
* @param is
* @param sign > 0 为增加的音量,< 0 为减少音量
* @param step
*/
void CainPlayer::update_volume(VideoState *is, int sign, double step) {
double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
}
/**
* 跳到下一帧
* @param is
*/
void CainPlayer::step_to_next_frame(VideoState *is) {
/* if the stream is paused unpause it, then step */
if (is->paused) {
stream_toggle_pause(is);
}
is->step = 1;
}
/**
* 计算延时
* @param delay
* @param is
* @return
*/
double CainPlayer::compute_target_delay(double delay, VideoState *is) {
double sync_threshold, diff = 0;
/* update delay to follow master synchronisation source */
if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
/* if video is slave, we try to correct big delays by
duplicating or deleting a frame */
diff = get_clock(&is->videoClock) - get_master_clock(is);
/* skip or repeat frame. We take into account the
delay to compute the threshold. I still don't know
if it is the best guess */
sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
if (diff <= -sync_threshold)
delay = FFMAX(0, delay + diff);
else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
delay = delay + diff;
else if (diff >= sync_threshold)
delay = 2 * delay;
}
}
av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
delay, -diff);
return delay;
}
/**
* 计算显示时长
* @param is
* @param vp
* @param nextvp
* @return
*/
double CainPlayer::vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
if (vp->serial == nextvp->serial) {
double duration = nextvp->pts - vp->pts;
if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
return vp->duration;
else
return duration;
} else {
return 0.0;
}
}
/**
* 更新pts
* @param is
* @param pts
* @param pos
* @param serial
*/
void CainPlayer::update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
/* update current video pts */
set_clock(&is->videoClock, pts, serial);
sync_clock_to_slave(&is->extClock, &is->videoClock);
}
/**
* 刷新视频画面
* @param opaque
* @param remaining_time
*/
void CainPlayer::video_refresh(void *opaque, double *remaining_time) {
VideoState *is = (VideoState *)opaque;
double time;
// 实时流状态下同步到外部时钟时,需要检查外部时钟的速度
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime) {
check_external_clock_speed(is);
}
if (is->videoStream) {
retry:
// 帧队列中存在数据时,则不断读出
if (frame_queue_nb_remaining(&is->videoFrameQueue) > 0) {
double last_duration, duration, delay;
Frame *vp, *lastvp;
/* dequeue the picture */
// 上一帧
lastvp = frame_queue_peek_last(&is->videoFrameQueue);
// 当前帧
vp = frame_queue_peek(&is->videoFrameQueue);
// 如果当前帧序列跟裸数据包的序列不一致时,需要跳帧
if (vp->serial != is->videoQueue.serial) {
frame_queue_next(&is->videoFrameQueue);
goto retry;
}
// 如果上一帧的序列跟当前帧的序列不一致,则需要重新计算帧计时器,因为有可能跳帧了
if (lastvp->serial != vp->serial) {
is->frame_timer = av_gettime_relative() / 1000000.0;
}
// 如果处于暂停状态,则直接显示
if (is->paused) {
goto display;
}
/* compute nominal last_duration */
// 计算上一次显示时长
last_duration = vp_duration(is, lastvp, vp);
// 根据上一次显示的时长,计算延时
delay = compute_target_delay(last_duration, is);
// 获取当前时间
time= av_gettime_relative() / 1000000.0;
// 如果当前时间小于帧计时器的时间 + 延时时间,则表示还没到当前帧
if (time < is->frame_timer + delay) {
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
goto display;
}
// 更新帧计时器
is->frame_timer += delay;
// 帧计时器落后当前时间超过了阈值,则用当前的时间作为帧计时器时间
if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX) {
is->frame_timer = time;
}
// 更新帧的pt和序列
MutexLock(is->videoFrameQueue.mutex);
if (!isnan(vp->pts)) {
update_video_pts(is, vp->pts, vp->pos, vp->serial);
}
MutexUnlock(is->videoFrameQueue.mutex);
// 如果队列中还剩余超过一帧的数据时,需要拿到下一帧,然后计算间隔,并判断是否需要进行舍帧操作
if (frame_queue_nb_remaining(&is->videoFrameQueue) > 1) {
Frame *nextvp = frame_queue_peek_next(&is->videoFrameQueue);
duration = vp_duration(is, vp, nextvp);
// 如果不处于同步到视频状态,并且处于跳帧状态,则跳过当前帧
if(!is->step && (framedrop > 0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER))
&& time > is->frame_timer + duration){
frame_queue_next(&is->videoFrameQueue);
goto retry;
}
}
frame_queue_next(&is->videoFrameQueue);
is->force_refresh = 1;
if (is->step && !is->paused) {
stream_toggle_pause(is);
}
}
display:
/* display picture */
if (!display_disable && is->force_refresh
&& is->show_mode == SHOW_MODE_VIDEO && is->videoFrameQueue.rindex_shown) {
video_display(is);
}
}
is->force_refresh = 0;
}
/**
* 将解码后的帧入队
* @param is
* @param src_frame
* @param pts
* @param duration
* @param pos
* @param serial
* @return
*/
int CainPlayer::queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial) {
Frame *vp;
if (!(vp = frame_queue_peek_writable(&is->videoFrameQueue))) {
return -1;
}
vp->sar = src_frame->sample_aspect_ratio;
vp->uploaded = 0;
vp->width = src_frame->width;
vp->height = src_frame->height;
vp->format = src_frame->format;
vp->pts = pts;
vp->duration = duration;
vp->pos = pos;
vp->serial = serial;
set_default_window_size(vp->width, vp->height, vp->sar);
av_frame_move_ref(vp->frame, src_frame);
frame_queue_push(&is->videoFrameQueue);
return 0;
}
/**
* 获得解码后的视频帧
* @param is
* @param frame
* @return
*/
int CainPlayer::get_video_frame(VideoState *is, AVFrame *frame) {
int got_picture;
if ((got_picture = decoder_decode_frame(&is->videoDecoder, frame, NULL)) < 0) {
return -1;
}
if (got_picture) {
double dpts = NAN;
if (frame->pts != AV_NOPTS_VALUE) {
dpts = av_q2d(is->videoStream->time_base) * frame->pts;
}
// 计算视频帧的长宽比
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->videoStream, frame);
// 是否需要做舍帧操作
if (framedrop > 0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
if (frame->pts != AV_NOPTS_VALUE) {
double diff = dpts - get_master_clock(is);
if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
diff - is->frame_last_filter_delay < 0 &&
is->videoDecoder.pkt_serial == is->videoClock.serial &&
is->videoQueue.nb_packets) {
av_frame_unref(frame);
got_picture = 0;
}
}
}
}
return got_picture;
}
int CainPlayer::read_thread(void *arg) {
CainPlayer * player = (CainPlayer *) arg;
return player->demux();
}
int CainPlayer::audio_thread(void *arg) {
CainPlayer * player = (CainPlayer *) arg;
return player->audioDecode();
}
int CainPlayer::video_thread(void *arg) {
CainPlayer * player = (CainPlayer *) arg;
return player->videoDecode();
}
/**
* 解复用
* @return
*/
int CainPlayer::demux() {
AVFormatContext *ic = NULL;
int err, i, ret;
int st_index[AVMEDIA_TYPE_NB];
AVPacket pkt1, *pkt = &pkt1;
int64_t stream_start_time;
int pkt_in_play_range = 0;
AVDictionaryEntry *t;
AVDictionary **opts;
int orig_nb_streams;
Mutex *wait_mutex = MutexCreate();
int scan_all_pmts_set = 0;
int64_t pkt_ts;
// 创建等待互斥锁
if (!wait_mutex) {
av_log(NULL, AV_LOG_FATAL, "MutexCreate(): %s\n", SDL_GetError());
ret = AVERROR(ENOMEM);
goto fail;
}
memset(st_index, -1, sizeof(st_index));
is->videoStreamIdx = -1;
is->audioStreamIdx = -1;
is->eof = 0;
// 创建解复用上下文
ic = avformat_alloc_context();
if (!ic) {
av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
ret = AVERROR(ENOMEM);
goto fail;
}
// 设置解复用中断回调
ic->interrupt_callback.callback = decode_interrupt_cb;
ic->interrupt_callback.opaque = is;
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
scan_all_pmts_set = 1;
}
// 打开视频文件
err = avformat_open_input(&ic, is->filename, NULL, &format_opts);
if (err < 0) {
print_error(is->filename, err);
ret = -1;
goto fail;
}
if (scan_all_pmts_set) {
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
}
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
ret = AVERROR_OPTION_NOT_FOUND;
goto fail;
}
is->ic = ic;
if (genpts) {
ic->flags |= AVFMT_FLAG_GENPTS;
}
av_format_inject_global_side_data(ic);
opts = setup_find_stream_info_opts(ic, codec_opts);
orig_nb_streams = ic->nb_streams;
// 查找媒体流信息
err = avformat_find_stream_info(ic, opts);
for (i = 0; i < orig_nb_streams; i++) {
av_dict_free(&opts[i]);
}
av_freep(&opts);
if (err < 0) {
av_log(NULL, AV_LOG_WARNING,
"%s: could not find codec parameters\n", is->filename);
ret = -1;
goto fail;
}
if (ic->pb) {
ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
}
// 判断是否以字节方式定位
if (seek_by_bytes < 0) {
seek_by_bytes =
!!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
}
// 计算帧的最大显示时长
is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
// 如果不是从头开始播放,则跳转播放位置
if (start_time != AV_NOPTS_VALUE) {
int64_t timestamp;
timestamp = start_time;
/* add the stream start time */
if (ic->start_time != AV_NOPTS_VALUE)
timestamp += ic->start_time;
ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
is->filename, (double)timestamp / AV_TIME_BASE);
}
}
// 判断是否属于实时流
is->realtime = is_realtime(ic);
// 查找媒体流
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
enum AVMediaType type = st->codecpar->codec_type;
st->discard = AVDISCARD_ALL;
if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1) {
if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0) {
st_index[type] = i;
}
}
}
for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
if (wanted_stream_spec[i] && st_index[i] == -1) {
av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n",
wanted_stream_spec[i], av_get_media_type_string((AVMediaType)i));
st_index[i] = INT_MAX;
}
}
// 是否禁止视频流
if (!video_disable) {
st_index[AVMEDIA_TYPE_VIDEO] =
av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
}
// 是否禁止音频流
if (!audio_disable) {
st_index[AVMEDIA_TYPE_AUDIO] =
av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
st_index[AVMEDIA_TYPE_AUDIO],
st_index[AVMEDIA_TYPE_VIDEO],
NULL, 0);
}
// 设置显示模式
is->show_mode = show_mode;
// 判断视频流索引是否存在,用来计算视频的显示区域
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
AVCodecParameters *codecpar = st->codecpar;
// 计算采样长宽比
AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
// 设置默认的窗口大小
if (codecpar->width) {
set_default_window_size(codecpar->width, codecpar->height, sar);
}
}
// 打开音频流
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
}
// 打开视频流
ret = -1;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
}
// 是否需要重新设置显示模式
if (is->show_mode == SHOW_MODE_NONE) {
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
}
// 判断音频流或者视频流是否打开了
if (is->videoStreamIdx < 0 && is->audioStreamIdx < 0) {
av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
is->filename);
ret = -1;
goto fail;
}
// 如果是实时流,设置无线缓冲区
if (infinite_buffer < 0 && is->realtime) {
infinite_buffer = 1;
}
// 进入解复用阶段
for (;;) {
// 停止播放
if (is->abort_request) {
break;
}
// 暂停状态
if (is->paused != is->last_paused) {
is->last_paused = is->paused;
if (is->paused) { // 如果此时处于暂停状态,则停止读文件
is->read_pause_return = av_read_pause(ic);
} else {
av_read_play(ic);
}
}
#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
// 如果此时处于暂停状态,并且不是rtsp、mmsh实时流,则延时10毫秒在继续下一轮读操作
if (is->paused &&
(!strcmp(ic->iformat->name, "rtsp") ||
(ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
/* wait 10 ms to avoid trying to get another packet */
/* XXX: horrible */
SDL_Delay(10);
continue;
}
#endif
// 如果处于定位操作状态,则进入定位操作
if (is->seek_req) {
int64_t seek_target = is->seek_pos;
int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
// 定位
ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"%s: error while seeking\n", is->ic->filename);
} else {
if (is->audioStreamIdx >= 0) {
packet_queue_flush(&is->audioQueue);
packet_queue_put(&is->audioQueue, &flush_pkt);
}
if (is->videoStreamIdx >= 0) {
packet_queue_flush(&is->videoQueue);
packet_queue_put(&is->videoQueue, &flush_pkt);
}
if (is->seek_flags & AVSEEK_FLAG_BYTE) {
set_clock(&is->extClock, NAN, 0);
} else {
set_clock(&is->extClock, seek_target / (double)AV_TIME_BASE, 0);
}
}
is->seek_req = 0;
is->queue_attachments_req = 1;
is->eof = 0;
// 如果此时处于暂停状态,则调到下一帧
if (is->paused) {
step_to_next_frame(is);
}
}
// 附着请求状态
if (is->queue_attachments_req) {
// 判断视频流是否存在
if (is->videoStream && is->videoStream->disposition & AV_DISPOSITION_ATTACHED_PIC) {
AVPacket copy;
if ((ret = av_copy_packet(©, &is->videoStream->attached_pic)) < 0) {
goto fail;
}
packet_queue_put(&is->videoQueue, ©);
packet_queue_put_nullpacket(&is->videoQueue, is->videoStreamIdx);
}
is->queue_attachments_req = 0;
}
// 如果队列已满,不需要再继续读了
if (infinite_buffer < 1 &&
(is->audioQueue.size + is->videoQueue.size > MAX_QUEUE_SIZE
|| (stream_has_enough_packets(is->audioStream, is->audioStreamIdx, &is->audioQueue) &&
stream_has_enough_packets(is->videoStream, is->videoStreamIdx, &is->videoQueue)))) {
/* wait 10 ms */
MutexLock(wait_mutex);
CondWaitTimeout(is->readCondition, wait_mutex, 10);
MutexUnlock(wait_mutex);
continue;
}
// 如果此时不能处于暂停状态,并且为了播放到结尾了,判断是否需要循环播放。
if (!is->paused &&
(!is->audioStream || (is->audioDecoder.finished == is->audioQueue.serial && frame_queue_nb_remaining(&is->audioFrameQueue) == 0)) &&
(!is->videoStream || (is->videoDecoder.finished == is->videoQueue.serial && frame_queue_nb_remaining(&is->videoFrameQueue) == 0))) {
if (loop != 1 && (!loop || --loop)) {
stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
} else if (autoexit) {
ret = AVERROR_EOF;
goto fail;
}
}
// 读出裸数据包
ret = av_read_frame(ic, pkt);
if (ret < 0) {
// 如果没能读出裸数据包,判断是否是结尾
if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
if (is->videoStreamIdx >= 0) {
packet_queue_put_nullpacket(&is->videoQueue, is->videoStreamIdx);
}
if (is->audioStreamIdx >= 0) {
packet_queue_put_nullpacket(&is->audioQueue, is->audioStreamIdx);
}
is->eof = 1;
}
if (ic->pb && ic->pb->error) {
break;
}
MutexLock(wait_mutex);
CondWaitTimeout(is->readCondition, wait_mutex, 10);
MutexUnlock(wait_mutex);
continue;
} else {
is->eof = 0;
}
// 计算pkt的pts是否处于播放范围内
stream_start_time = ic->streams[pkt->stream_index]->start_time;
pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
// 播放范围内
pkt_in_play_range = duration == AV_NOPTS_VALUE ||
(pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
av_q2d(ic->streams[pkt->stream_index]->time_base) -
(double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
<= ((double)duration / 1000000);
if (pkt->stream_index == is->audioStreamIdx && pkt_in_play_range) {
packet_queue_put(&is->audioQueue, pkt);
} else if (pkt->stream_index == is->videoStreamIdx && pkt_in_play_range
&& !(is->videoStream->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
packet_queue_put(&is->videoQueue, pkt);
} else {
av_packet_unref(pkt);
}
}
ret = 0;
fail:
if (ic && !is->ic) {
avformat_close_input(&ic);
}
// 如果结果不为0,则发送退出指令
if (ret != 0) {
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = is;
SDL_PushEvent(&event);
}
MutexDestroy(wait_mutex);
return 0;
}
/**
* 音频解码
* @return
*/
int CainPlayer::audioDecode() {
AVFrame *frame = av_frame_alloc();
Frame *af;
int got_frame = 0;
AVRational tb;
int ret = 0;
if (!frame) {
return AVERROR(ENOMEM);
}
do {
if ((got_frame = decoder_decode_frame(&is->audioDecoder, frame, NULL)) < 0) {
goto the_end;
}
if (got_frame) {
tb = (AVRational){1, frame->sample_rate};
if (!(af = frame_queue_peek_writable(&is->audioFrameQueue))){
goto the_end;
}
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
af->pos = av_frame_get_pkt_pos(frame);
af->serial = is->audioDecoder.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
av_frame_move_ref(af->frame, frame);
frame_queue_push(&is->audioFrameQueue);
}
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
the_end:
av_frame_free(&frame);
return ret;
}
/**
* 视频解码
* @return
*/
int CainPlayer::videoDecode() {
AVFrame *frame = av_frame_alloc();
double pts;
double duration;
int ret;
AVRational tb = is->videoStream->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->videoStream, NULL);
if (!frame) {
return AVERROR(ENOMEM);
}
for (;;) {
ret = get_video_frame(is, frame);
if (ret < 0)
goto the_end;
if (!ret) {
continue;
}
duration = (frame_rate.num && frame_rate.den
? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
ret = queue_picture(is, frame, pts, duration,
av_frame_get_pkt_pos(frame), is->videoDecoder.pkt_serial);
av_frame_unref(frame);
if (ret < 0) {
goto the_end;
}
}
the_end:
av_frame_free(&frame);
return 0;
}
/**
* 同步音频
* @param is
* @param nb_samples
* @return
*/
int CainPlayer::synchronize_audio(VideoState *is, int nb_samples) {
int wanted_nb_samples = nb_samples;
/* if not master, then we try to remove or add samples to correct the clock */
if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
double diff, avg_diff;
int min_nb_samples, max_nb_samples;
diff = get_clock(&is->audioClock) - get_master_clock(is);
if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
/* not enough measures to have a correct estimate */
is->audio_diff_avg_count++;
} else {
/* estimate the A-V difference */
avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
if (fabs(avg_diff) >= is->audio_diff_threshold) {
wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
}
}
} else {
/* too big difference : may be initial PTS errors, so
reset A-V filter */
is->audio_diff_avg_count = 0;
is->audio_diff_cum = 0;
}
}
return wanted_nb_samples;
}
/**
* 音频解码
* @param is
* @return
*/
int CainPlayer::audio_decode_frame(VideoState *is) {
int data_size, resampled_data_size;
int64_t dec_channel_layout;
av_unused double audio_clock0;
int wanted_nb_samples;
Frame *af;
// 处于暂停状态
if (is->paused) {
return -1;
}
do {
if (!(af = frame_queue_peek_readable(&is->audioFrameQueue))) {
return -1;
}
frame_queue_next(&is->audioFrameQueue);
} while (af->serial != is->audioQueue.serial);
data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
af->frame->nb_samples,
(AVSampleFormat)af->frame->format, 1);
dec_channel_layout =
(af->frame->channel_layout && av_frame_get_channels(af->frame) == av_get_channel_layout_nb_channels(af->frame->channel_layout))
? af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
if (af->frame->format != is->audio_src.fmt ||
dec_channel_layout != is->audio_src.channel_layout ||
af->frame->sample_rate != is->audio_src.freq ||
(wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
swr_free(&is->swr_ctx);
is->swr_ctx = swr_alloc_set_opts(NULL, is->audio_tgt.channel_layout, is->audio_tgt.fmt,
is->audio_tgt.freq, dec_channel_layout,
(AVSampleFormat)af->frame->format, af->frame->sample_rate,
0, NULL);
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
av_log(NULL, AV_LOG_ERROR,
"Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
af->frame->sample_rate, av_get_sample_fmt_name((AVSampleFormat)af->frame->format), av_frame_get_channels(af->frame),
is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
swr_free(&is->swr_ctx);
return -1;
}
is->audio_src.channel_layout = dec_channel_layout;
is->audio_src.channels = av_frame_get_channels(af->frame);
is->audio_src.freq = af->frame->sample_rate;
is->audio_src.fmt = (AVSampleFormat)af->frame->format;
}
if (is->swr_ctx) {
const uint8_t **in = (const uint8_t **)af->frame->extended_data;
uint8_t **out = &is->audio_buf1;
int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
int len2;
if (out_size < 0) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
return -1;
}
if (wanted_nb_samples != af->frame->nb_samples) {
if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
return -1;
}
}
av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
if (!is->audio_buf1)
return AVERROR(ENOMEM);
len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
if (len2 < 0) {
av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
return -1;
}
if (len2 == out_count) {
av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
if (swr_init(is->swr_ctx) < 0)
swr_free(&is->swr_ctx);
}
is->audio_buf = is->audio_buf1;
resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
} else {
is->audio_buf = af->frame->data[0];
resampled_data_size = data_size;
}
/* update the audio clock with the pts */
if (!isnan(af->pts)) {
is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
} else {
is->audio_clock = NAN;
}
is->audio_clock_serial = af->serial;
return resampled_data_size;
}
/**
* 音频设备回调
* @param opaque
* @param stream
* @param len
*/
void CainPlayer::sdl_audio_callback(void *opaque, Uint8 *stream, int len) {
CainPlayer *player = (CainPlayer *) opaque;
player->audio_callback(stream, len);
}
/**
* 音频设备回调
* @param stream
* @param len
*/
void CainPlayer::audio_callback(Uint8 *stream, int len) {
int audio_size, len1;
audio_callback_time = av_gettime_relative();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
audio_size = audio_decode_frame(is);
if (audio_size < 0) {
/* if error, just output silence */
is->audio_buf = NULL;
is->audio_buf_size = (unsigned int) (SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size
* is->audio_tgt.frame_size);
} else {
is->audio_buf_size = audio_size;
}
is->audio_buf_index = 0;
}
len1 = is->audio_buf_size - is->audio_buf_index;
if (len1 > len) {
len1 = len;
}
if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME) {
memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
} else {
memset(stream, 0, len1);
if (!is->muted && is->audio_buf) {
SDL_MixAudio(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1,
is->audio_volume);
}
}
len -= len1;
stream += len1;
is->audio_buf_index += len1;
}
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
if (!isnan(is->audio_clock)) {
set_clock_at(&is->audioClock,
is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec,
is->audio_clock_serial,
audio_callback_time / 1000000.0);
sync_clock_to_slave(&is->extClock, &is->audioClock);
}
}
/**
* 打开音频设备
* @param opaque
* @param wanted_channel_layout
* @param wanted_nb_channels
* @param wanted_sample_rate
* @param audio_hw_params
* @return
*/
int CainPlayer::audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels,
int wanted_sample_rate, struct AudioParams *audio_hw_params) {
SDL_AudioSpec wanted_spec, spec;
const char *env;
static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
env = SDL_getenv("SDL_AUDIO_CHANNELS");
if (env) {
wanted_nb_channels = atoi(env);
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
}
if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
}
wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
wanted_spec.channels = wanted_nb_channels;
wanted_spec.freq = wanted_sample_rate;
if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
return -1;
}
while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq) {
next_sample_rate_idx--;
}
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.silence = 0;
wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
wanted_spec.callback = sdl_audio_callback;
wanted_spec.userdata = this;
while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
wanted_spec.channels, wanted_spec.freq, SDL_GetError());
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
if (!wanted_spec.channels) {
wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
wanted_spec.channels = wanted_nb_channels;
if (!wanted_spec.freq) {
av_log(NULL, AV_LOG_ERROR,
"No more combinations to try, audio open failed\n");
return -1;
}
}
wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
}
if (spec.format != AUDIO_S16SYS) {
av_log(NULL, AV_LOG_ERROR,
"SDL advised audio format %d is not supported!\n", spec.format);
return -1;
}
if (spec.channels != wanted_spec.channels) {
wanted_channel_layout = av_get_default_channel_layout(spec.channels);
if (!wanted_channel_layout) {
av_log(NULL, AV_LOG_ERROR,
"SDL advised channel count %d is not supported!\n", spec.channels);
return -1;
}
}
audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
audio_hw_params->freq = spec.freq;
audio_hw_params->channel_layout = wanted_channel_layout;
audio_hw_params->channels = spec.channels;
audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
return -1;
}
return spec.size;
}
/**
* 解码中断回调
* @param ctx
* @return
*/
int CainPlayer::decode_interrupt_cb(void *ctx) {
VideoState *is = (VideoState *)ctx;
return is->abort_request;
}
/**
* 判断媒体流中是否存在足够的裸数据包
* @param st
* @param stream_id
* @param queue
* @return
*/
int CainPlayer::stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
return (stream_id < 0) || (queue->abort_request)
|| (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
|| (queue->nb_packets > MIN_FRAMES)
&& (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
}
/**
* 是否实时流
* @param s
* @return
*/
int CainPlayer::is_realtime(AVFormatContext *s) {
if (!strcmp(s->iformat->name, "rtp") || !strcmp(s->iformat->name, "rtsp")
|| !strcmp(s->iformat->name, "sdp")) {
return 1;
}
if (s->pb && (!strncmp(s->filename, "rtp:", 4) || !strncmp(s->filename, "udp:", 4))) {
return 1;
}
return 0;
}
/**
* 视频刷新线程句柄
* @param arg
* @return
*/
int CainPlayer::video_refresh_thread(void *arg) {
CainPlayer *player = (CainPlayer *) arg;
player->refreshVideo();
return 0;
}
/**
* 刷新画面
*/
void CainPlayer::refreshVideo() {
double remaining_time = 0.0;
while (!is->abort_request) {
if (remaining_time > 0.0) {
av_usleep((int) (int64_t) (remaining_time * 1000000.0));
}
remaining_time = REFRESH_RATE;
if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh)) {
video_refresh(is, &remaining_time);
}
}
}
/**
* 事件循环
* @param cur_stream
*/
void CainPlayer::event_loop(VideoState *cur_stream) {
SDL_Event event;
double incr, pos;
bool exitLooper = 0;
for (;;) {
double x;
switch (event.type) {
case SDL_KEYDOWN:
switch (event.key.keysym.sym) {
case SDLK_ESCAPE:
// 退出
case SDLK_q:
do_exit(cur_stream);
break;
// 暂停/播放
case SDLK_p:
case SDLK_SPACE:
toggle_pause(cur_stream);
break;
// 静音
case SDLK_m:
toggle_mute(cur_stream);
break;
// 增加声音
case SDLK_KP_MULTIPLY:
case SDLK_0:
update_volume(cur_stream, 1, SDL_VOLUME_STEP);
break;
// 减少声音
case SDLK_KP_DIVIDE:
case SDLK_9:
update_volume(cur_stream, -1, SDL_VOLUME_STEP);
break;
// 下一帧
case SDLK_s: // S: Step to next frame
step_to_next_frame(cur_stream);
break;
// 向前10秒
case SDLK_LEFT:
incr = -10.0;
goto do_seek;
// 向后10秒
case SDLK_RIGHT:
incr = 10.0;
goto do_seek;
// 向后60秒
case SDLK_UP:
incr = 60.0;
goto do_seek;
// 向前1分钟
case SDLK_DOWN:
incr = -60.0;
do_seek:
if (seek_by_bytes) {
pos = -1;
if (pos < 0 && cur_stream->videoStreamIdx >= 0)
pos = frame_queue_last_pos(&cur_stream->videoFrameQueue);
if (pos < 0 && cur_stream->audioStreamIdx >= 0)
pos = frame_queue_last_pos(&cur_stream->audioFrameQueue);
if (pos < 0)
pos = avio_tell(cur_stream->ic->pb);
if (cur_stream->ic->bit_rate)
incr *= cur_stream->ic->bit_rate / 8.0;
else
incr *= 180000.0;
pos += incr;
stream_seek(cur_stream, pos, incr, 1);
} else {
pos = get_master_clock(cur_stream);
if (isnan(pos))
pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
pos += incr;
if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
}
break;
default:
break;
}
break;
case SDL_QUIT:
case FF_QUIT_EVENT:
do_exit(cur_stream);
exitLooper = 1;
break;
default:
break;
}
// 是否退出事件循环
if (exitLooper) {
break;
}
}
}
/**
* 锁管理器
* @param mtx
* @param op
* @return
*/
int CainPlayer::lockmgr(void **mtx, enum AVLockOp op) {
switch(op) {
case AV_LOCK_CREATE:
*mtx = MutexCreate();
if(!*mtx) {
av_log(NULL, AV_LOG_FATAL, "MutexCreate(): %s\n", SDL_GetError());
return 1;
}
return 0;
case AV_LOCK_OBTAIN:
return !!MutexLock((Mutex*)*mtx);
case AV_LOCK_RELEASE:
return !!MutexUnlock((Mutex*)*mtx);
case AV_LOCK_DESTROY:
MutexDestroy((Mutex*)*mtx);
return 0;
}
return 1;
}
/**
* 播放
* @param argc
* @param argv
* @return
*/
int CainPlayer::play(int argc, char **argv) {
int flags;
av_log_set_flags(AV_LOG_SKIP_REPEATED);
//注册所有解码器、解复用器和协议
av_register_all();
avformat_network_init();
av_dict_set(&sws_dict, "flags", "bicubic", 0);
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
// 输入文件名
char *input_filename = argv[1];
if (!input_filename) {
av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
exit(1);
}
if (display_disable) {
video_disable = 1;
}
flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
if (audio_disable) {
flags &= ~SDL_INIT_AUDIO;
} else {
/* Try to work around an occasional ALSA buffer underflow issue when the
* period size is NPOT due to ALSA resampling by forcing the buffer size. */
if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
}
if (display_disable) {
flags &= ~SDL_INIT_VIDEO;
}
if (SDL_Init (flags)) {
av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
exit(1);
}
SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
// 注册锁管理器
if (av_lockmgr_register(lockmgr)) {
av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
do_exit(NULL);
}
// 初始化裸数据包
av_init_packet(&flush_pkt);
flush_pkt.data = (uint8_t *)&flush_pkt;
// 打开媒体流
is = stream_open(input_filename);
if (!is) {
av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
do_exit(NULL);
}
// 事件循环
event_loop(is);
return 0;
}
到这里,还不行,我们还缺少了 NDK的Log方法,里面的ALOG开头的方法都是我封装的,如下:
native_log.h:
#ifndef CAINCAMERA_NATIVE_LOG_H
#define CAINCAMERA_NATIVE_LOG_H
#include <android/log.h>
#define JNI_DEBUG 1
#define JNI_TAG "CainJni_ffmpeg"
#define ALOGE(format, ...) if (JNI_DEBUG) { __android_log_print(ANDROID_LOG_ERROR, JNI_TAG, format, ##__VA_ARGS__); }
#define ALOGI(format, ...) if (JNI_DEBUG) { __android_log_print(ANDROID_LOG_INFO, JNI_TAG, format, ##__VA_ARGS__); }
#define ALOGD(format, ...) if (JNI_DEBUG) { __android_log_print(ANDROID_LOG_DEBUG, JNI_TAG, format, ##__VA_ARGS__); }
#define ALOGW(format, ...) if (JNI_DEBUG) { __android_log_print(ANDROID_LOG_WARN, JNI_TAG, format, ##__VA_ARGS__); }
#endif //CAINCAMERA_NATIVE_LOG_H
然后,我们还发现,缺少几个方法,这几个方法是在ffmpeg源码中的cmdutils.c文件中的,我们不需要cmdutils.c整个文件,只需要少数几个方法而已,因此,这里我们直接将那几个方法移植过来就好:
CmdUtils.h
#ifndef CAINPLAYER_CMDUTILS_H
#define CAINPLAYER_CMDUTILS_H
#include "ffplay_def.h"
#ifdef __cplusplus
extern "C" {
#endif
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec);
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts);
void print_error(const char *filename, int err);
#ifdef __cplusplus
};
#endif
#endif //CAINPLAYER_CMDUTILS_H
CmdUtils.cpp:
#include "CmdUtils.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* 过滤解码器属性
* @param opts
* @param codec_id
* @param s
* @param st
* @param codec
* @return
*/
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec) {
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
: AV_OPT_FLAG_DECODING_PARAM;
char prefix = 0;
const AVClass *cc = avcodec_get_class();
if (!codec)
codec = s->oformat ? avcodec_find_encoder(codec_id)
: avcodec_find_decoder(codec_id);
switch (st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO:
prefix = 'v';
flags |= AV_OPT_FLAG_VIDEO_PARAM;
break;
case AVMEDIA_TYPE_AUDIO:
prefix = 'a';
flags |= AV_OPT_FLAG_AUDIO_PARAM;
break;
case AVMEDIA_TYPE_SUBTITLE:
prefix = 's';
flags |= AV_OPT_FLAG_SUBTITLE_PARAM;
break;
}
while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) {
char *p = strchr(t->key, ':');
/* check stream specification in opt name */
if (p)
switch (check_stream_specifier(s, st, p + 1)) {
case 1: *p = 0; break;
case 0: continue;
default: break;
}
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
!codec ||
(codec->priv_class &&
av_opt_find(&codec->priv_class, t->key, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&ret, t->key, t->value, 0);
else if (t->key[0] == prefix &&
av_opt_find(&cc, t->key + 1, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ))
av_dict_set(&ret, t->key + 1, t->value, 0);
if (p)
*p = ':';
}
return ret;
}
/**
* 检查媒体流
* @param s
* @param st
* @param spec
* @return
*/
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec) {
int ret = avformat_match_stream_specifier(s, st, spec);
if (ret < 0)
av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
return ret;
}
/**
* 设置媒体流信息
* @param s
* @param codec_opts
* @return
*/
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts) {
int i;
AVDictionary **opts;
if (!s->nb_streams)
return NULL;
opts = (AVDictionary **) av_mallocz_array(s->nb_streams, sizeof(*opts));
if (!opts) {
av_log(NULL, AV_LOG_ERROR,
"Could not alloc memory for stream options.\n");
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
s, s->streams[i], NULL);
return opts;
}
/**
* 打印错误
* @param filename
* @param err
*/
void print_error(const char *filename, int err) {
char errbuf[128];
const char *errbuf_ptr = errbuf;
if (av_strerror(err, errbuf, sizeof(errbuf)) < 0)
errbuf_ptr = strerror(AVUNERROR(err));
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", filename, errbuf_ptr);
}
#ifdef __cplusplus
};
#endif
OK,到了这里,我们就将整个ffplay的播放器核心功能就移植过来了。但这样还不行,SDL还要用到main方法,因此我们需要添加一个这样的方法:
CainPlayerController.cpp:
#include "CainPlayer.h"
int main(int argc, char **argv) {
CainPlayer *player = new CainPlayer();
int ret = player->play(argc, argv);
delete player;
return 0;
}
到这里,我们就将整个ffplay的核心功能移植过来了。编译运行,我们就可以看到,播放器成功运行了。
缺点:
这里只是将ffplay的播放器核心功能移植过来,去掉了字幕和AVFilter滤镜的方法。如果需要添加字幕和滤镜的话,你也可以在移植过程中,不去掉这些功能。
ffplay在播放4K视频的时候,发现画面卡死了,这个估计是SDL的问题。因此,你支持4K视频的的话,肯定是要将整个SDL替换掉的,渲染效率不够。
另外一个问题就是,这里仅仅是把播放器的核心功能移植过来,并没有考虑到控制逻辑的问题,因此,你无法使用点击返回、定位、调整音量大小等逻辑。这个其实是SDL的问题。
本人并不太喜欢SDL的逻辑,在定制操作时,还要对SDL中src/core/android/SDL_android.c的源码动刀,个人觉得这个架构思路对移动端来说并不好,so能独立就独立。在移植过程中,本人就已经将SDL的锁和线程用自己实现的方法替换掉了。只有音频播放和视频帧渲染的方法没有替换掉,这里其实可以将整个SDL替换掉的,这样你就可以脱离SDL的束缚,根据实际的需求定制了,ijkplayer就是这样的一种思路,自己实现一个ijksdl的代码,将SDL的代码完全替换掉。
不过个人觉得ijkplayer的文件太散乱了,而且每个版本的方法有可能发生变化。比如说,本人想要对视频添加滤镜,那么需修改的地方太多了,而且一旦升级版本之后,以前的代码就可能失效了。感觉ijkplayer 还是可以做得更好的。后续,有机会的话,本人将会介绍如何替换掉播放器中的SDL相关方法。
网友评论