在音视频-SDL播放YUV(下)成功播放yuv裸流数据之后, 下载的是对应格式的yuv420p的数据.
如果自己通过Mac, 或者是其他摄像头录制的视频, 支持的格式不一定是yuv420p, 我自己通过 ffmpeg -f avfoundation -framerate 30 -i 0 out.yuv
在Mac上录制的yuv视频格式默认就是uyvy422的像素格式, 所以通过音视频-SDL播放YUV(下)中直接播放, 会因为像素格式不对, 导致播放异常.
Mac主要看输出信息 Output #0
-pixel_format : 像素格式 uyvy422
-framerate : 帧率 30
-video_size : 视频大小1280x720
所以这个时候需要把uyvy422的像素格式转换为420p的像素格式.
需要用到的ffmpeg的库 #include <libswscale/swscale.h>
核心函数sws_scale
/**
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*/
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]);
思路跟播放yuv视频差不多
- 把 uyvy422 转为 yuv420p
- uyvy422 的数据格式 uyvy uyvy uyvy uyvy 占2个字节
- yuv420p 的数据格式 yyyy yyyy uu vv 占1.5个字节
- 整体思路, 一帧的uyvy422 转为 一帧的yuv420p
- 简单粗暴 : 读取输入源文件, 每次读取一帧的数据, 读取传入buffer缓冲区, 通过核心函数sws_scale, 转换到输出缓冲区, 最后写入目标文件中
核心代码
#include "swsscalethread.h"
#include <QFile>
#include <QDebug>
extern "C" {
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libswresample/swresample.h>
}
#define SWS_ERROR_BUF(ret) \
char errbuf[1024]; \
av_strerror(ret, errbuf, sizeof (errbuf));
#define CHECK_END(ret, funcStr) \
if (ret) { \
SWS_ERROR_BUF(ret); \
qDebug() << #funcStr << " error :" << errbuf; \
goto end; \
}
SwsScaleThread::SwsScaleThread(QObject *parent) : QThread(parent) {
// 当监听到线程结束时(finished),就调用deleteLater回收内存
connect(this, &SwsScaleThread::finished,
this, &SwsScaleThread::deleteLater);
}
SwsScaleThread::~SwsScaleThread() {
// 断开所有的连接
disconnect();
// 内存回收之前,正常结束线程
requestInterruption();
// 安全退出
quit();
wait();
qDebug() << this << "析构(内存被回收)";
}
void SwsScaleThread::run() {
// ffplay -video_size 1280X720 -pixel_format uyvy422 -framerate 30 /Users/liliguang/Desktop/record_to_yuv.yuv
// 把 uyvy422 转为 yuv420p
// uyvy422 的数据格式 uyvy uyvy uyvy uyvy 占2个字节
// yuv420p 的数据格式 yyyy yyyy uu vv 占1.5个字节
// 整体思路, 一帧的uyvy422 转为 一帧的yuv420p
// 简单粗暴 : 读取输入源文件, 每次读取一帧的数据, 读取传入buffer缓冲区, 通过核心函数sws_scale, 转换到输出缓冲区, 最后写入目标文件中
// 输入源
int srcW = 1280;
int srcH = 720;
AVPixelFormat srcFormat = AV_PIX_FMT_UYVY422;
int srcImageSize = srcW * srcH * 2;
// 输入源buffer缓冲区
// 这里的4根据av_image_alloc()中的参数来决定的.
uint8_t *srcData[4];
int srclinesizes[4];
// 创建srcData缓冲区Ret
int srcDataRet;
// 输入源文件
const char *srcFilePath = "/Users/liliguang/Desktop/srcYuv.yuv";
// 输出源
int dstW = 640;
int dstH = 360;
AVPixelFormat dstFormat = AV_PIX_FMT_YUV420P;
int dsrImageSize = dstW * dstH * 1.5;
// 输出源buffer缓冲区
uint8_t *dstData[4];
int dstlinesizes[4];
int dstDataRet;
const char *dstFilePath = "/Users/liliguang/Desktop/dstYuv.yuv";
QFile srcFile(srcFilePath);
QFile dstFile(dstFilePath);
int openSrcFileRet;
int openDstFileRet;
struct SwsContext *context;
int swsScaleRet;
// 第一步 : 文件操作
openSrcFileRet = srcFile.open(QFile::ReadOnly);
CHECK_END(!openSrcFileRet, "srcFile open");
openDstFileRet = dstFile.open(QFile::WriteOnly);
CHECK_END(!openDstFileRet, "dstFile open");
// 第二步 : 创建上下文, 后面四个参数参照官方Demo
context = sws_getContext(
srcW, srcH, srcFormat,
dstW, dstH, dstFormat,
SWS_BILINEAR, NULL,NULL,NULL
);
CHECK_END(!context, "sws_getContext");
// 第三步 : 创建buffer缓冲区
srcDataRet = av_image_alloc(srcData, srclinesizes, srcW, srcH, srcFormat, 16);
CHECK_END(!srcDataRet, "srcData av_image_alloc");
qDebug() <<"srcDataRet : " << srcDataRet ;
dstDataRet = av_image_alloc(dstData, dstlinesizes, dstW, dstH, dstFormat, 16);
CHECK_END(!dstDataRet, "dstData av_image_alloc");
qDebug() <<"dstDataRet : " << dstDataRet ;
qDebug() <<"开始读取文件" ;
int readRet;
int writeRet;
while( (readRet = srcFile.read((char *)srcData[0], srcImageSize)) > 0) {
qDebug() << "readRet : " << readRet;
// 转换
swsScaleRet = sws_scale(context, srcData, srclinesizes, 0, srcH, dstData, dstlinesizes );
qDebug() << "swsScaleRet : " << swsScaleRet;
// 写入文件
writeRet = dstFile.write((char *)dstData[0], dsrImageSize);
CHECK_END(!writeRet, "dstFile write");
}
qDebug() << "readRet : " << readRet;
qDebug() <<"结束读取文件" ;
end:
// 关闭文件,释放资源
srcFile.close();
dstFile.close();
// 释放输入缓冲区
av_freep(&srcData);
av_freep(&dstData);
// 释放重采样上下文
sws_freeContext(context);
}
转换结果
粗略计算
输入源的总大小
Src : 248832000
输入源的一帧的大小
imageSize : 1280 * 720 * 2 = 1843200
输入源的总共有多少帧
Frame : 248832000 / 1843200 = 135
输出源的总大小
Dst : 46656000
输出源的一帧的大小
imageSize : 640 * 360 * 1.5 = 345600
输出源的总共有多少帧
Frame : 46656000 / 345600 = 135
通过ffmpeg命令行播放srcYuv.yuv
输入文件也可以通过 Stream #0:0: Video: rawvideo (UYVY / 0x59565955), uyvy422, 1280x720, 442368 kb/s, 30 tbr, 30 tbn 看到相关的输出参数
网友评论