459 lines
16 KiB
C++
459 lines
16 KiB
C++
#include "VideoCoder.h"
|
||
#include "Debuger.h"
|
||
FILE *p = nullptr;
|
||
int VideoCoder::OnBuffer(double dblSampleTime, BYTE * pBuffer, long lBufferSize)
|
||
{
|
||
this->Encode(pBuffer, lBufferSize, AV_PIX_FMT_YUV420P);
|
||
return 0;
|
||
}
|
||
|
||
int VideoCoder::OnCameraData(uint8_t * dat, uint32_t size)
|
||
{
|
||
//std::cout<<"captrue data and into coder"<<std::endl;
|
||
this->Encode(dat, size, AV_PIX_FMT_YUV420P);
|
||
return 0;
|
||
}
|
||
|
||
int VideoCoder::SetDestPix(uint8_t width, uint8_t height) {
|
||
this->mDestHeight = height;
|
||
this->mDestWidth = width;
|
||
return 0;
|
||
}
|
||
|
||
///
|
||
/// @brief 垂直翻转RGB数据
|
||
/// @param[in] width - RGB图像宽度
|
||
/// @param[in] height - RGB图像高度
|
||
/// @param[in] rgbData - RGB图像数据指针
|
||
/// @param[in] bitsPerPixel - 每一个像素的字节大小,通常为rgbData的长度/width*height
|
||
///
|
||
|
||
int VideoCoder::Filter(int in_width, int in_height, unsigned char* rgbData, int bitsPerPixel)
|
||
{
|
||
//1.注册过滤器
|
||
avfilter_register_all();
|
||
|
||
//2.创建一个过滤器图层,管理所有过滤器
|
||
AVFilterGraph* filter_graph = avfilter_graph_alloc();
|
||
if (!filter_graph) {
|
||
printf("Fail to create filter graph!\n");
|
||
return -1;
|
||
}
|
||
|
||
//3.获取一个用于AVFilterGraph输入的过滤器
|
||
// source filter,源头过滤器,过滤器参数 一般为图像格式信息
|
||
char args[512];
|
||
sprintf(args,
|
||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||
in_width, in_height, AV_PIX_FMT_YUV420P, 1, 25, 1, 1);
|
||
//bufferSrc->description = Buffer video frames, and make them accessible to the filterchain.
|
||
const AVFilter* bufferSrc = avfilter_get_by_name("buffer"); // AVFilterGraph的输入源
|
||
|
||
AVFilterContext* bufferSrc_ctx;
|
||
int ret = avfilter_graph_create_filter(&bufferSrc_ctx, bufferSrc, "in", args, NULL, filter_graph);
|
||
if (ret < 0) {
|
||
printf("Fail to create filter bufferSrc\n");
|
||
return -1;
|
||
}
|
||
|
||
//4.获取一个用于AVFilterGraph输出的过滤器
|
||
// sink filter,
|
||
AVBufferSinkParams *bufferSink_params;
|
||
AVFilterContext* bufferSink_ctx;
|
||
const AVFilter* bufferSink = avfilter_get_by_name("buffersink");
|
||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
|
||
|
||
bufferSink_params = av_buffersink_params_alloc();
|
||
bufferSink_params->pixel_fmts = pix_fmts;
|
||
ret = avfilter_graph_create_filter(&bufferSink_ctx, bufferSink, "out", NULL,
|
||
bufferSink_params, filter_graph);
|
||
if (ret < 0) {
|
||
printf("Fail to create filter sink filter\n");
|
||
return -1;
|
||
}
|
||
|
||
//5. split filter:用于分流,outputs=2表示输出两股流
|
||
const AVFilter *splitFilter = avfilter_get_by_name("split");
|
||
AVFilterContext *splitFilter_ctx;
|
||
ret = avfilter_graph_create_filter(&splitFilter_ctx, splitFilter, "split", "outputs=2",
|
||
NULL, filter_graph);
|
||
if (ret < 0) {
|
||
printf("Fail to create split filter\n");
|
||
return -1;
|
||
}
|
||
|
||
//6. crop filter:裁剪,out_w=iw:out_h=ih/2:x=0:y=0表示裁剪得到的区域
|
||
const AVFilter *cropFilter = avfilter_get_by_name("crop");
|
||
AVFilterContext *cropFilter_ctx;
|
||
ret = avfilter_graph_create_filter(&cropFilter_ctx, cropFilter, "crop",
|
||
"out_w=iw:out_h=ih/2:x=0:y=0", NULL, filter_graph);
|
||
if (ret < 0) {
|
||
printf("Fail to create crop filter\n");
|
||
return -1;
|
||
}
|
||
|
||
//7. vflip filter:垂直翻转,从上到下?==》最终合成的位置由overlay过滤器设置,经确认是将裁剪的区域翻转后拼接到图像下半部分,
|
||
//图像上半部分不变(不变是因为split分割出了两路流,过滤器是操作的辅助流)
|
||
//配合使用crop:out_w=iw:out_h=ih/2:x=0:y=ih/2进行验证
|
||
const AVFilter *vflipFilter = avfilter_get_by_name("vflip");
|
||
AVFilterContext *vflipFilter_ctx;
|
||
ret = avfilter_graph_create_filter(&vflipFilter_ctx, vflipFilter, "vflip", NULL, NULL, filter_graph);
|
||
if (ret < 0) {
|
||
printf("Fail to create vflip filter\n");
|
||
return -1;
|
||
}
|
||
|
||
//8. overlay filter:视频合成
|
||
const AVFilter *overlayFilter = avfilter_get_by_name("overlay");
|
||
AVFilterContext *overlayFilter_ctx;
|
||
ret = avfilter_graph_create_filter(&overlayFilter_ctx, overlayFilter, "overlay",
|
||
"y=0:H/2", NULL, filter_graph);
|
||
if (ret < 0) {
|
||
printf("Fail to create overlay filter\n");
|
||
return -1;
|
||
}
|
||
// split用于分流
|
||
// crop确定裁剪位置
|
||
// vflip仅翻转图像
|
||
// overlay确定视频叠加位置
|
||
|
||
// [main]
|
||
//input ----> split ---------------------> overlay --> output
|
||
// | ^
|
||
// |[tmp] [flip]|
|
||
// | |
|
||
// v |
|
||
// +-----> crop --> vflip -------+
|
||
|
||
//9.过滤器连接
|
||
// src filter to split filter
|
||
ret = avfilter_link(bufferSrc_ctx, 0, splitFilter_ctx, 0);
|
||
if (ret != 0) {
|
||
printf("Fail to link src filter and split filter\n");
|
||
return -1;
|
||
}
|
||
// split filter's first pad to overlay filter's main pad
|
||
ret = avfilter_link(splitFilter_ctx, 0, overlayFilter_ctx, 0);
|
||
if (ret != 0) {
|
||
printf("Fail to link split filter and overlay filter main pad\n");
|
||
return -1;
|
||
}
|
||
// split filter's second pad to crop filter
|
||
ret = avfilter_link(splitFilter_ctx, 1, cropFilter_ctx, 0);
|
||
if (ret != 0) {
|
||
printf("Fail to link split filter's second pad and crop filter\n");
|
||
return -1;
|
||
}
|
||
// crop filter to vflip filter
|
||
ret = avfilter_link(cropFilter_ctx, 0, vflipFilter_ctx, 0);
|
||
if (ret != 0) {
|
||
printf("Fail to link crop filter and vflip filter\n");
|
||
return -1;
|
||
}
|
||
// vflip filter to overlay filter's second pad
|
||
ret = avfilter_link(vflipFilter_ctx, 0, overlayFilter_ctx, 1);
|
||
if (ret != 0) {
|
||
printf("Fail to link vflip filter and overlay filter's second pad\n");
|
||
return -1;
|
||
}
|
||
// overlay filter to sink filter
|
||
ret = avfilter_link(overlayFilter_ctx, 0, bufferSink_ctx, 0);
|
||
if (ret != 0) {
|
||
printf("Fail to link overlay filter and sink filter\n");
|
||
return -1;
|
||
}
|
||
|
||
//10. check filter graph
|
||
ret = avfilter_graph_config(filter_graph, NULL);
|
||
if (ret < 0) {
|
||
printf("Fail in filter graph\n");
|
||
return -1;
|
||
}
|
||
|
||
//11.打印filtergraph具体情况
|
||
char *graph_str = avfilter_graph_dump(filter_graph, NULL);
|
||
FILE* graphFile = NULL;
|
||
fopen_s(&graphFile, "graphFile.txt", "w"); // 打印filtergraph的具体情况
|
||
fprintf(graphFile, "%s", graph_str);
|
||
av_free(graph_str);
|
||
|
||
//数据处理,按帧处理
|
||
AVFrame *frame_in = av_frame_alloc();
|
||
unsigned char *frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
|
||
av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
|
||
AV_PIX_FMT_YUV420P, in_width, in_height, 1);
|
||
|
||
AVFrame *frame_out = av_frame_alloc();
|
||
unsigned char *frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
|
||
av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
|
||
AV_PIX_FMT_YUV420P, in_width, in_height, 1);
|
||
|
||
frame_in->width = in_width;
|
||
frame_in->height = in_height;
|
||
frame_in->format = AV_PIX_FMT_YUV420P;
|
||
uint32_t frame_count = 0;
|
||
}
|
||
|
||
|
||
VideoCoder::VideoCoder(int width, int height, AVPixelFormat formt):
|
||
mObserver(nullptr),
|
||
mFrame(nullptr),
|
||
mPitureBuffer(nullptr),
|
||
mFormatCtx(nullptr),
|
||
mOutputFmt(nullptr),
|
||
mVideoStream(nullptr),
|
||
mCodecCtx(nullptr),
|
||
mCodec(nullptr) {
|
||
AVCodecID codec_id = AV_CODEC_ID_H264;
|
||
mCodec = avcodec_find_encoder(codec_id);
|
||
|
||
av_register_all();
|
||
if (nullptr == p) {
|
||
p = fopen("shit.h264", "wb");
|
||
}
|
||
this->mWidth = width;
|
||
this->mHeight = height;
|
||
this->mInformat = formt;
|
||
if (!mCodec) {
|
||
printf("Codec not found\n");
|
||
}
|
||
this->mFormatCtx = avformat_alloc_context();
|
||
|
||
//原文链接:https ://blog.csdn.net/leixiaohua1020/article/details/25430425 引用来自雷神的文章,雷神保佑
|
||
this->mOutputFmt = av_guess_format(NULL, "shit.h264", NULL);
|
||
this->mFormatCtx->oformat = mOutputFmt;
|
||
mCodecCtx = avcodec_alloc_context3(mCodec);
|
||
if (!mCodecCtx) {
|
||
printf("Could not allocate video codec context\n");
|
||
}
|
||
mCodecCtx->bit_rate = 1000;
|
||
this->mDestHeight = 480;
|
||
this->mDestWidth = 640;
|
||
mCodecCtx->width = this->mDestWidth;
|
||
mCodecCtx->height = this->mDestHeight;
|
||
mCodecCtx->time_base.num = 1;
|
||
mCodecCtx->time_base.den = 10;
|
||
mCodecCtx->max_b_frames = 0;
|
||
mCodecCtx->qmin = 10;
|
||
mCodecCtx->qmax = 25;
|
||
//mCodecCtx->flags |= AV_CODEC_FLAG_LOW_DELAY;
|
||
mCodecCtx->gop_size = 25;
|
||
mCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||
av_opt_set(mCodecCtx->priv_data, "preset", "superfast", 0);
|
||
av_opt_set(mCodecCtx->priv_data, "tune", "zerolatency", 0);
|
||
if (avcodec_open2(mCodecCtx, mCodec, NULL) < 0) {
|
||
printf("Could not open codec\n");
|
||
}
|
||
mFrame = av_frame_alloc();
|
||
if (!mFrame) {
|
||
printf("Could not allocate video frame\n");
|
||
}
|
||
mFrame->format = mCodecCtx->pix_fmt;
|
||
mFrame->width = mCodecCtx->width;
|
||
mFrame->height = mCodecCtx->height;
|
||
mFrame->pts = 0;
|
||
int ret = av_image_alloc(mFrame->data, mFrame->linesize, mCodecCtx->width, mCodecCtx->height,
|
||
mCodecCtx->pix_fmt, 8);
|
||
if (ret < 0) {
|
||
printf("Could not allocate raw picture buffer\n");
|
||
}
|
||
|
||
// 让我们假设分辨率都是不可改变的,AvPack可以复用
|
||
avformat_write_header(mFormatCtx, NULL);
|
||
int picture_size = avpicture_get_size(AV_PIX_FMT_YUV420P, mCodecCtx->width, mCodecCtx->height);
|
||
}
|
||
|
||
VideoCoder::~VideoCoder()
|
||
{
|
||
fclose(p);
|
||
}
|
||
|
||
void VideoCoder::Encode(uint8_t * src, int size, enum AVPixelFormat format) {
|
||
uint8_t *pFrame[4];
|
||
int lineSize[4];
|
||
static int debugs = 1;
|
||
//如果不是yuv420p就转成yuv420p
|
||
int iFramesize;
|
||
|
||
av_init_packet(&mAVPack);
|
||
mAVPack.data = NULL; // packet data will be allocated by the encoder
|
||
|
||
int ret = av_image_alloc(pFrame, lineSize, mWidth, mHeight, AV_PIX_FMT_YUV420P, 1);
|
||
if (ret< 0) {
|
||
Debuger::Debug(L"Could not allocate destination image\n");
|
||
}
|
||
|
||
if (this->mInformat != AV_PIX_FMT_YUV420P || (this->mDestHeight != mHeight)) {
|
||
int size = avpicture_get_size(this->mInformat,mWidth,mHeight);
|
||
this->forceYUV420P(src, size, mInformat, (uint8_t ***)&pFrame,&iFramesize);
|
||
//仅仅支持yuv420p
|
||
mFrame->data[0] = pFrame[0]; //Y
|
||
mFrame->data[1] = pFrame[1]; //U
|
||
mFrame->data[2] = pFrame[2]; //V
|
||
}
|
||
else {
|
||
mFrame->data[0] = src; //Y
|
||
mFrame->data[1] = src + mWidth*mHeight; //U
|
||
mFrame->data[2] = src + mWidth*mHeight + mWidth*mHeight/4; //V
|
||
}
|
||
//PTS
|
||
mFrame->pts++;
|
||
int got_picture = 0;
|
||
//Encode
|
||
ret = avcodec_encode_video2(mCodecCtx, &mAVPack, mFrame, &got_picture);
|
||
|
||
if (got_picture > 0) {
|
||
// fwrite(mAVPack.data, 1, mAVPack.size, p);
|
||
// fflush(p);
|
||
if(nullptr != this->mObserver)
|
||
this->mObserver->OnGetCodeFrame(mAVPack.data, mAVPack.size);
|
||
}
|
||
Debuger::Debug(L"Succeed to encode frame: %5d\tsize:%5d\n", 1, mAVPack.size);
|
||
|
||
|
||
// 刷新coder,防止包挤压
|
||
av_packet_unref(&mAVPack);
|
||
av_freep(&pFrame[0]);
|
||
free(pFrame[0]);
|
||
|
||
//av_freep(&mFrame->data[0]);
|
||
//av_freep(&mFrame->data[0]);
|
||
}
|
||
|
||
void VideoCoder::SetOutPutPixel(unsigned int width, unsigned int height)
|
||
{
|
||
this->mHeight = height;
|
||
this->mWidth = width;
|
||
}
|
||
|
||
int VideoCoder::flushCoder(AVFormatContext *fmt_ctx, unsigned int stream_index) {
|
||
int ret;
|
||
int got_frame;
|
||
AVPacket enc_pkt;
|
||
if (!(this->mFormatCtx->streams[stream_index]->codec->codec->capabilities ))
|
||
return 0;
|
||
while (1) {
|
||
enc_pkt.data = NULL;
|
||
enc_pkt.size = 0;
|
||
av_init_packet(&enc_pkt);
|
||
ret = avcodec_encode_video2(fmt_ctx->streams[stream_index]->codec, &enc_pkt,
|
||
NULL, &got_frame);
|
||
av_frame_free(NULL);
|
||
if (ret < 0)
|
||
break;
|
||
if (!got_frame) {
|
||
ret = 0;
|
||
break;
|
||
}
|
||
Debuger::Debug(L"Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
|
||
/* mux encoded frame */
|
||
ret = av_write_frame(fmt_ctx, &enc_pkt);
|
||
if (ret < 0)
|
||
break;
|
||
}
|
||
return ret;
|
||
}
|
||
// 强制把其他个数的数据转换成libav可以认得到的数据
|
||
int VideoCoder::forceYUV420P(uint8_t * src, int size,
|
||
AVPixelFormat format,uint8_t **dst[4],int *len)
|
||
{
|
||
uint8_t *src_data[4];
|
||
int src_linesize[4];
|
||
uint8_t *dst_data[4];
|
||
int dst_linesize[4];
|
||
struct SwsContext *img_convert_ctx;
|
||
int ret = 0;
|
||
|
||
if (nullptr == dst || nullptr == len) {
|
||
return -2;
|
||
}
|
||
|
||
int src_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(format));
|
||
AVPixelFormat dst_pixfmt = AV_PIX_FMT_YUV420P;
|
||
int dst_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt));
|
||
|
||
ret = av_image_alloc(src_data, src_linesize, mWidth, mHeight, format, 1);
|
||
if (ret< 0) {
|
||
Debuger::Debug(L"Could not allocate source image\n");
|
||
return -1;
|
||
}
|
||
ret = av_image_alloc(dst_data, dst_linesize, mDestWidth, mDestHeight, AV_PIX_FMT_YUV420P, 1);
|
||
if (ret< 0) {
|
||
Debuger::Debug(L"Could not allocate destination image\n");
|
||
return -1;
|
||
}
|
||
|
||
img_convert_ctx = sws_alloc_context();
|
||
//Show AVOption
|
||
//av_opt_show2(img_convert_ctx, stdout, AV_OPT_FLAG_VIDEO_PARAM, 0);
|
||
//Set Value
|
||
av_opt_set_int(img_convert_ctx, "sws_flags", SWS_BICUBIC | SWS_PRINT_INFO, 0);
|
||
av_opt_set_int(img_convert_ctx, "srcw", mWidth, 0);
|
||
av_opt_set_int(img_convert_ctx, "srch", mHeight, 0);
|
||
av_opt_set_int(img_convert_ctx, "src_format", format, 0);
|
||
av_opt_set_int(img_convert_ctx, "src_range", 1, 0);
|
||
|
||
av_opt_set_int(img_convert_ctx, "dstw", mDestWidth, 0);
|
||
av_opt_set_int(img_convert_ctx, "dsth", mDestHeight, 0);
|
||
av_opt_set_int(img_convert_ctx, "dst_format", dst_pixfmt, 0);
|
||
av_opt_set_int(img_convert_ctx, "dst_range", 1, 0);
|
||
sws_init_context(img_convert_ctx, NULL, NULL);
|
||
|
||
// 设置输入
|
||
switch (format) {
|
||
case AV_PIX_FMT_GRAY8: {
|
||
memcpy(src_data[0], src, mWidth*mHeight);
|
||
break;
|
||
}
|
||
case AV_PIX_FMT_YUV420P: {
|
||
memcpy(src_data[0], src, mWidth*mHeight); //Y
|
||
memcpy(src_data[1], src + mWidth*mHeight, mWidth*mHeight / 4); //U
|
||
memcpy(src_data[2], src + mWidth*mHeight * 5 / 4, mWidth*mHeight / 4); //V
|
||
break;
|
||
}
|
||
case AV_PIX_FMT_YUV422P: {
|
||
memcpy(src_data[0], src, mWidth*mHeight); //Y
|
||
memcpy(src_data[1], src + mWidth*mHeight, mWidth*mHeight / 2); //U
|
||
memcpy(src_data[2], src + mWidth*mHeight * 3 / 2, mWidth*mHeight / 2); //V
|
||
break;
|
||
}
|
||
case AV_PIX_FMT_YUV444P: {
|
||
memcpy(src_data[0], src, mWidth*mHeight); //Y
|
||
memcpy(src_data[1], src + mWidth*mHeight, mWidth*mHeight); //U
|
||
memcpy(src_data[2], src + mWidth*mHeight * 2, mWidth*mHeight); //V
|
||
break;
|
||
}
|
||
case AV_PIX_FMT_YUYV422: {
|
||
memcpy(src_data[0], src, mWidth*mHeight * 2); //Packed
|
||
break;
|
||
}
|
||
case AV_PIX_FMT_RGB24: {
|
||
memcpy(src_data[0], src, mWidth*mHeight * 3); //Packed
|
||
break;
|
||
}
|
||
case AV_PIX_FMT_BGRA: {
|
||
memcpy(src_data[0], src, mWidth*mHeight *4); //Packed
|
||
break;
|
||
}
|
||
default: {
|
||
Debuger::Debug(L"Not Support Input Pixel Format.\n");
|
||
break;
|
||
}
|
||
}
|
||
// 转换数据
|
||
ret = sws_scale(img_convert_ctx, src_data, src_linesize, 0, mHeight, dst_data, dst_linesize);
|
||
if (ret < 0) {
|
||
return ret;
|
||
}
|
||
memcpy(dst[0], dst_data[0], mDestWidth*mDestHeight);
|
||
memcpy(dst[1], dst_data[1], mDestWidth*mDestHeight /4);
|
||
memcpy(dst[2], dst_data[2], mDestWidth*mDestHeight /4);
|
||
|
||
*len = mDestWidth*mDestHeight + mDestWidth*mDestHeight / 2;
|
||
// source此时就不需要了,但是dst要在外面free
|
||
av_freep(&src_data[0]);
|
||
|
||
sws_freeContext(img_convert_ctx);
|
||
return 0;
|
||
}
|