Train_Identify/nvidia_ascend_engine/nvidia_engine/DecodeEngine/HardH264FFmpegDecode.cpp

292 lines
9.4 KiB
C++

#include "HardH264FFmpegDecode.h"
using namespace std;
HardH264FFmpegDecode::HardH264FFmpegDecode()
{
;
}
HardH264FFmpegDecode::~HardH264FFmpegDecode()
{
;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsigned int uiHeight, unsigned int uiFrameRate)
{
uiWidth_ = uiWidth; uiHeight_ = uiHeight;
uiFrameRate_ = uiFrameRate;
iFrameFinished_ = 0;
av_log_set_level(AV_LOG_ERROR);
// AVCodecID codec_id = AV_CODEC_ID_H264; //解码H264
// pCodec_ = avcodec_find_decoder(codec_id); //获取解码器
pCodec_ = avcodec_find_decoder_by_name(NVIDIA_H264_DECODER);
if (!pCodec_) {
fprintf(stderr, "Codec '%s' not found\n", pCodec_->long_name);
exit(1);
}
printf("Codec found with name %d(%s)\n", pCodec_->id, pCodec_->long_name);
//创建上下文
pCodecCtx_ = avcodec_alloc_context3(pCodec_);
if (!pCodecCtx_){
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
//创建解析器
pCodecParserCtx_ = av_parser_init(pCodec_->id);
if (!pCodecParserCtx_){
fprintf(stderr, "parser not found\n");
exit(1);
}
//if(pCodec_->capabilities&CODEC_CAP_TRUNCATED)
// pCodecCtx_->flags|= CODEC_FLAG_TRUNCATED;
//打开解码器
int ret = avcodec_open2(pCodecCtx_, pCodec_, nullptr);
if (ret < 0) {
fprintf(stderr, "Could not open codec\n");
printf("avcodec_open2 ret is: %d\n",ret);
exit(1);
}
//分配packet
pPacket_ = av_packet_alloc();
if (!pPacket_){
fprintf(stderr, "Could not allocate video packet\n");
exit(1);
}
// av_init_packet(pPacket_);
//分配frame
pSrcFrame_ = av_frame_alloc();
if (!pSrcFrame_) {
fprintf(stderr, "Could not allocate video src pFrame\n");
exit(1);
}
pDstFrame_ = av_frame_alloc();
if (!pDstFrame_) {
fprintf(stderr, "Could not allocate video dst pFrame\n");
exit(1);
}
printf("after align down video_width: %d, video_height: %d\n", uiWidth_, uiHeight_);
//初始化解析器参数
pCodecCtx_->time_base.num = 1;
pCodecCtx_->frame_number = 1; //每包一个视频帧
pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx_->bit_rate = 0;
pCodecCtx_->time_base.den = uiFrameRate_;//帧率
pCodecCtx_->width = uiWidth_; //视频宽
pCodecCtx_->height = uiHeight_; //视频高
// pCodecCtx_->pix_fmt = AV_PIX_FMT_YUV420P;
int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
pCodecCtx_->width,
pCodecCtx_->height, 1);
pu8OutBuffer_ = (unsigned char *) av_malloc(bufferSize);
av_image_fill_arrays(pDstFrame_->data,
pDstFrame_->linesize,
pu8OutBuffer_,
AV_PIX_FMT_YUV420P,
pCodecCtx_->width,
pCodecCtx_->height, 1);
printf("pDstFrame_->linesize: %d, bufferSize: %d\n", pDstFrame_->linesize, bufferSize);
pSwsContext_ = sws_getContext(pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt,
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
printf("pCodecCtx_->width: %d, pCodecCtx_->height: %d, pCodecCtx_->pix_fmt: %d\n", pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt);
return 0;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderDeInit()
{
if(pu8OutBuffer_){
av_free(pu8OutBuffer_);
pu8OutBuffer_ = nullptr;
}
if(pSrcFrame_){
av_frame_free(&pSrcFrame_);
pSrcFrame_ = nullptr;
}
if(pDstFrame_){
av_frame_free(&pDstFrame_);
pDstFrame_ = nullptr;
}
if(pPacket_){
av_packet_free(&pPacket_);
pPacket_ = nullptr;
}
if(pCodecParserCtx_){
av_parser_close(pCodecParserCtx_);
pCodecParserCtx_ = nullptr;
}
if(pCodecCtx_){
avcodec_close(pCodecCtx_);
av_free(pCodecCtx_);
pCodecCtx_ = nullptr;
}
if(pSwsContext_){
sws_freeContext(pSwsContext_);
pSwsContext_ = nullptr;
}
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph, AVFilterContext *pSourceCtx, AVFilterContext *pSinkCtx)
{
int ret;
AVFilterInOut *pOutputs = nullptr, *pInputs = nullptr;
if ((ret = avfilter_link(pSourceCtx, 0, pSinkCtx, 0)) >= 0){
ret = avfilter_graph_config(pGraph, nullptr);
}
avfilter_inout_free(&pOutputs);
avfilter_inout_free(&pInputs);
return ret;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGraph *pGraph, AVFilterContext* &pDecoderFilterIn, AVFilterContext* &pDecoderFilterOut, const int iWidth, const int iHeight, const int iFormat)
{
int iPixFormats[2] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
char BufferSrcArgs[256] = {0};
AVFilterContext *pFiltSrc = nullptr, *pFiltDst = nullptr;
int ret;
snprintf(BufferSrcArgs, sizeof(BufferSrcArgs),
"video_size=%dx%d:pix_fmt=%d:time_base=1/1200000",
iWidth, iHeight, iFormat);
if ((ret = avfilter_graph_create_filter(&pFiltSrc,
avfilter_get_by_name("buffer"), "ffplay_buffer", BufferSrcArgs,
nullptr, pGraph)) < 0){
goto fail;
}
ret = avfilter_graph_create_filter(&pFiltDst,
avfilter_get_by_name("buffersink"),
"ffplay_buffersink", nullptr, nullptr, pGraph);
if (ret < 0){
goto fail;
}
if ((ret = av_opt_set_int_list(pFiltDst, "pix_fmts", iPixFormats, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0){
goto fail;
}
if ((ret = HardH264FFmpegDecoderFilterGraph(pGraph, pFiltSrc, pFiltDst)) < 0){
goto fail;
}
pDecoderFilterIn = pFiltSrc;
pDecoderFilterOut = pFiltDst;
fail:
return ret;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame *pFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize)
{
int ret;
AVFilterGraph* pDecoderGraph = nullptr;
ret = avcodec_send_packet(pDecCtx, pPkt); //接收packet解码
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(pDecCtx, pFrame); //解码
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
fprintf(stderr, "During decoding eof\n");
return -1;
}
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
//printf("saving frame %3d\n", pDecCtx->frame_number);
fflush(stdout);
AVFilterContext *pDecoderFilterIn = nullptr, *pDecoderFilterOut = nullptr;
// pFrame->width = ALIGN_DOWN(pFrame->width, 32);
// pFrame->height = ALIGN_DOWN(pFrame->height, 32);
// printf("pFrame->width: %d\tpFrame->height: %d\n", pFrame->width, pFrame->height);
pDecoderGraph = avfilter_graph_alloc();
HardH264FFmpegDecoderConfigureVideoFilters(pDecoderGraph, pDecoderFilterIn, pDecoderFilterOut, pFrame->width, pFrame->height, pFrame->format);
if (pFrame->format != AV_PIX_FMT_YUV420P){
DUMP_FRAME(pFrame);
ret = av_buffersrc_add_frame(pDecoderFilterIn, pFrame);
ret = av_buffersink_get_frame_flags(pDecoderFilterOut, pFrame, 0);
DUMP_FRAME(pFrame);
int iSize = pFrame->width * pFrame->height;
memcpy(pOutputData, pFrame->data[0], iSize); //Y
memcpy(pOutputData+iSize, pFrame->data[1], iSize/4); //U
memcpy(pOutputData+iSize+iSize/4, pFrame->data[2], iSize/4); //V
*puiOutputDataSize = iSize*3/2;
return iSize*3/2;
}
}
return 0;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsContext *pSwsCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData,unsigned int* puiOutputDataSize)
{
int ret;
ret = avcodec_send_packet(pDecCtx, pPkt); //接收packet解码
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_frame(pDecCtx, pSrcFrame); //解码
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
fprintf(stderr, "During decoding eof\n");
return -1;
}
else if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
exit(1);
}
// pDecCtx->width = ALIGN_DOWN(pDecCtx->width, 32);
// pDecCtx->height = ALIGN_DOWN(pDecCtx->height, 32);
sws_scale(pSwsCtx,
(const uint8_t *const *)pSrcFrame->data,
pSrcFrame->linesize,
0,
pDecCtx->height,
pDstFrame->data,
pDstFrame->linesize);
//printf("saving frame %3d\n", pDecCtx->frame_number);
fflush(stdout);
int iSize = pDecCtx->width * pDecCtx->height;
memcpy(pOutputData, pDstFrame->data[0], iSize); //Y
memcpy(pOutputData+iSize, pDstFrame->data[1], iSize/4); //U
memcpy(pOutputData+iSize+iSize/4, pDstFrame->data[2], iSize/4); //V
*puiOutputDataSize = iSize*3/2;
return iSize*3/2;
}
return 0;
}