1、识别结果增加车厢开始结束时间
2、来车检测后,对来车状态改为存储在txt中,模型推理时读取来判定帧所对应的车厢状态
This commit is contained in:
parent
2e67e97508
commit
d72bd78b59
|
@ -516,6 +516,7 @@ typedef struct
|
|||
uint64_t i64TimeStamp = 0; //帧数据时间戳
|
||||
std::shared_ptr<DecodedData> pDecodeData = nullptr;
|
||||
int iDirection = 0; //行驶方向(0-未知; 1-向左; 2-向右)
|
||||
int nMonitorState = MONITOR_MODEL_INIT_STATE;
|
||||
} SaveImgData;
|
||||
|
||||
//识别处理数据
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -113,6 +113,8 @@ void MergerAllEngine::PushData(std::shared_ptr<Train> pTrain)
|
|||
<< "集装箱2: " << pTrain->container2.strContainerNo << "\n"
|
||||
<< "集装箱2图片: " << pTrain->container2.strBestImg << "\n"
|
||||
<< "集装箱2时间戳: " << pTrain->container2.i64TimeStamp << "\n"
|
||||
<< "车厢开始时间: " << MyUtils::getins()->Stamp2Time(pTrain->i64StartTimeStamp, true) << "\n"
|
||||
<< "车厢结束时间: " << MyUtils::getins()->Stamp2Time(pTrain->i64EndTimeStamp, true) << "\n"
|
||||
<< " ---所有信息合并结果 END--- ";
|
||||
if (pTrain->bIsEnd)
|
||||
{
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -154,6 +154,7 @@ APP_ERROR SaveImgEngine::Process()
|
|||
Json::Value jvFrameInfo;
|
||||
jvFrameInfo["timeStamp"] = pSaveImgData->i64TimeStamp;
|
||||
jvFrameInfo["status"] = iStatus;
|
||||
jvFrameInfo["moveType"] = pSaveImgData->nMonitorState;
|
||||
jvFrameInfo["direction"] = pSaveImgData->iDirection;
|
||||
jvFrameInfo["width"] = iWidth;
|
||||
jvFrameInfo["height"] = iHeight;
|
||||
|
|
|
@ -301,8 +301,8 @@ void SaveStepOneResultEngine::DealTrainSpaceInfo(std::shared_ptr<ProcessData> pP
|
|||
if (!(bDealCenterFlag_ && !bIntervalFlag && (iCenterCur < (pProcessData->iWidth / 3 + 30))))
|
||||
{
|
||||
vecParationInfo_.push_back(parationInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (iDirection_ == DIRECTION_RIGHT)
|
||||
{
|
||||
|
@ -550,7 +550,7 @@ APP_ERROR SaveStepOneResultEngine::Process()
|
|||
{
|
||||
//车头没有属性,因此车头号也加入到属性中。保证向右行驶属性在前时最后2节的切分。
|
||||
//车头只加入一次,防止一个车头2个车头号的场景。但有两个车头且没识别车头间隔则无法处理。
|
||||
if (!bHaveHeadFlag_)
|
||||
if (!bHaveHeadFlag_)
|
||||
{
|
||||
bool bIntervalFlag = ((int)(pProcessData->iFrameId - headInfo_.iFrameId) > iSplitSpan_ && headInfo_.iFrameId != 0);
|
||||
|
||||
|
@ -585,7 +585,7 @@ APP_ERROR SaveStepOneResultEngine::Process()
|
|||
}
|
||||
else if (postSubData.iTargetType == CONTAINER)
|
||||
{
|
||||
jvStep1Container.append(jvInfo);
|
||||
jvStep1Container.append(jvInfo);
|
||||
}
|
||||
else if (postSubData.iTargetType == SPACE)
|
||||
{
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include "SocketEngine.h"
|
||||
#include "SocketEngine.h"
|
||||
|
||||
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -2,54 +2,54 @@
|
|||
|
||||
using namespace std;
|
||||
|
||||
HardH264FFmpegDecode::HardH264FFmpegDecode()
|
||||
HardH264FFmpegDecode::HardH264FFmpegDecode()
|
||||
{
|
||||
;
|
||||
;
|
||||
}
|
||||
|
||||
HardH264FFmpegDecode::~HardH264FFmpegDecode()
|
||||
HardH264FFmpegDecode::~HardH264FFmpegDecode()
|
||||
{
|
||||
;
|
||||
;
|
||||
}
|
||||
|
||||
int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsigned int uiHeight, unsigned int uiFrameRate)
|
||||
{
|
||||
uiWidth_ = uiWidth; uiHeight_ = uiHeight;
|
||||
uiFrameRate_ = uiFrameRate;
|
||||
iFrameFinished_ = 0;
|
||||
uiWidth_ = uiWidth; uiHeight_ = uiHeight;
|
||||
uiFrameRate_ = uiFrameRate;
|
||||
iFrameFinished_ = 0;
|
||||
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
// AVCodecID codec_id = AV_CODEC_ID_H264; //解码H264
|
||||
// pCodec_ = avcodec_find_decoder(codec_id); //获取解码器
|
||||
|
||||
// AVCodecID codec_id = AV_CODEC_ID_H264; //解码H264
|
||||
// pCodec_ = avcodec_find_decoder(codec_id); //获取解码器
|
||||
|
||||
pCodec_ = avcodec_find_decoder_by_name(NVIDIA_H264_DECODER);
|
||||
pCodec_ = avcodec_find_decoder_by_name(NVIDIA_H264_DECODER);
|
||||
if (!pCodec_) {
|
||||
fprintf(stderr, "Codec '%s' not found\n", pCodec_->long_name);
|
||||
exit(1);
|
||||
}
|
||||
printf("Codec found with name %d(%s)\n", pCodec_->id, pCodec_->long_name);
|
||||
printf("Codec found with name %d(%s)\n", pCodec_->id, pCodec_->long_name);
|
||||
|
||||
//创建上下文
|
||||
pCodecCtx_ = avcodec_alloc_context3(pCodec_);
|
||||
//创建上下文
|
||||
pCodecCtx_ = avcodec_alloc_context3(pCodec_);
|
||||
if (!pCodecCtx_){
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
//创建解析器
|
||||
pCodecParserCtx_ = av_parser_init(pCodec_->id);
|
||||
if (!pCodecParserCtx_){
|
||||
|
||||
//创建解析器
|
||||
pCodecParserCtx_ = av_parser_init(pCodec_->id);
|
||||
if (!pCodecParserCtx_){
|
||||
fprintf(stderr, "parser not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//if(pCodec_->capabilities&CODEC_CAP_TRUNCATED)
|
||||
// pCodecCtx_->flags|= CODEC_FLAG_TRUNCATED;
|
||||
|
||||
//打开解码器
|
||||
// pCodecCtx_->flags|= CODEC_FLAG_TRUNCATED;
|
||||
|
||||
//打开解码器
|
||||
int ret = avcodec_open2(pCodecCtx_, pCodec_, nullptr);
|
||||
if (ret < 0) {
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
printf("avcodec_open2 ret is: %d\n",ret);
|
||||
exit(1);
|
||||
|
@ -63,7 +63,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsign
|
|||
}
|
||||
// av_init_packet(pPacket_);
|
||||
|
||||
//分配frame
|
||||
//分配frame
|
||||
pSrcFrame_ = av_frame_alloc();
|
||||
if (!pSrcFrame_) {
|
||||
fprintf(stderr, "Could not allocate video src pFrame\n");
|
||||
|
@ -78,14 +78,14 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsign
|
|||
|
||||
printf("after align down video_width: %d, video_height: %d\n", uiWidth_, uiHeight_);
|
||||
|
||||
//初始化解析器参数
|
||||
pCodecCtx_->time_base.num = 1;
|
||||
pCodecCtx_->frame_number = 1; //每包一个视频帧
|
||||
pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
pCodecCtx_->bit_rate = 0;
|
||||
pCodecCtx_->time_base.den = uiFrameRate_;//帧率
|
||||
pCodecCtx_->width = uiWidth_; //视频宽
|
||||
pCodecCtx_->height = uiHeight_; //视频高
|
||||
//初始化解析器参数
|
||||
pCodecCtx_->time_base.num = 1;
|
||||
pCodecCtx_->frame_number = 1; //每包一个视频帧
|
||||
pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
pCodecCtx_->bit_rate = 0;
|
||||
pCodecCtx_->time_base.den = uiFrameRate_;//帧率
|
||||
pCodecCtx_->width = uiWidth_; //视频宽
|
||||
pCodecCtx_->height = uiHeight_; //视频高
|
||||
// pCodecCtx_->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
|
||||
|
@ -102,46 +102,46 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsign
|
|||
printf("pDstFrame_->linesize: %d, bufferSize: %d\n", pDstFrame_->linesize, bufferSize);
|
||||
|
||||
pSwsContext_ = sws_getContext(pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt,
|
||||
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
|
||||
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
|
||||
printf("pCodecCtx_->width: %d, pCodecCtx_->height: %d, pCodecCtx_->pix_fmt: %d\n", pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt);
|
||||
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int HardH264FFmpegDecode::HardH264FFmpegDecoderDeInit()
|
||||
{
|
||||
if(pu8OutBuffer_){
|
||||
if(pu8OutBuffer_){
|
||||
av_free(pu8OutBuffer_);
|
||||
pu8OutBuffer_ = nullptr;
|
||||
}
|
||||
|
||||
if(pSrcFrame_){
|
||||
av_frame_free(&pSrcFrame_);
|
||||
pSrcFrame_ = nullptr;
|
||||
}
|
||||
if(pDstFrame_){
|
||||
av_frame_free(&pDstFrame_);
|
||||
pDstFrame_ = nullptr;
|
||||
}
|
||||
if(pPacket_){
|
||||
av_packet_free(&pPacket_);
|
||||
if(pSrcFrame_){
|
||||
av_frame_free(&pSrcFrame_);
|
||||
pSrcFrame_ = nullptr;
|
||||
}
|
||||
if(pDstFrame_){
|
||||
av_frame_free(&pDstFrame_);
|
||||
pDstFrame_ = nullptr;
|
||||
}
|
||||
if(pPacket_){
|
||||
av_packet_free(&pPacket_);
|
||||
pPacket_ = nullptr;
|
||||
}
|
||||
if(pCodecParserCtx_){
|
||||
av_parser_close(pCodecParserCtx_);
|
||||
pCodecParserCtx_ = nullptr;
|
||||
}
|
||||
if(pCodecCtx_){
|
||||
avcodec_close(pCodecCtx_);
|
||||
av_free(pCodecCtx_);
|
||||
pCodecCtx_ = nullptr;
|
||||
}
|
||||
if(pCodecParserCtx_){
|
||||
av_parser_close(pCodecParserCtx_);
|
||||
pCodecParserCtx_ = nullptr;
|
||||
}
|
||||
if(pCodecCtx_){
|
||||
avcodec_close(pCodecCtx_);
|
||||
av_free(pCodecCtx_);
|
||||
pCodecCtx_ = nullptr;
|
||||
}
|
||||
|
||||
if(pSwsContext_){
|
||||
sws_freeContext(pSwsContext_);
|
||||
pSwsContext_ = nullptr;
|
||||
}
|
||||
if(pSwsContext_){
|
||||
sws_freeContext(pSwsContext_);
|
||||
pSwsContext_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
int HardH264FFmpegDecode::HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph, AVFilterContext *pSourceCtx, AVFilterContext *pSinkCtx)
|
||||
|
@ -149,7 +149,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph
|
|||
int ret;
|
||||
AVFilterInOut *pOutputs = nullptr, *pInputs = nullptr;
|
||||
if ((ret = avfilter_link(pSourceCtx, 0, pSinkCtx, 0)) >= 0){
|
||||
ret = avfilter_graph_config(pGraph, nullptr);
|
||||
ret = avfilter_graph_config(pGraph, nullptr);
|
||||
}
|
||||
|
||||
avfilter_inout_free(&pOutputs);
|
||||
|
@ -168,14 +168,14 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGra
|
|||
"video_size=%dx%d:pix_fmt=%d:time_base=1/1200000",
|
||||
iWidth, iHeight, iFormat);
|
||||
if ((ret = avfilter_graph_create_filter(&pFiltSrc,
|
||||
avfilter_get_by_name("buffer"), "ffplay_buffer", BufferSrcArgs,
|
||||
nullptr, pGraph)) < 0){
|
||||
avfilter_get_by_name("buffer"), "ffplay_buffer", BufferSrcArgs,
|
||||
nullptr, pGraph)) < 0){
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&pFiltDst,
|
||||
avfilter_get_by_name("buffersink"),
|
||||
"ffplay_buffersink", nullptr, nullptr, pGraph);
|
||||
avfilter_get_by_name("buffersink"),
|
||||
"ffplay_buffersink", nullptr, nullptr, pGraph);
|
||||
if (ret < 0){
|
||||
goto fail;
|
||||
}
|
||||
|
@ -190,14 +190,14 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGra
|
|||
pDecoderFilterIn = pFiltSrc;
|
||||
pDecoderFilterOut = pFiltDst;
|
||||
|
||||
fail:
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame *pFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize)
|
||||
{
|
||||
int ret;
|
||||
AVFilterGraph* pDecoderGraph = nullptr;
|
||||
int ret;
|
||||
AVFilterGraph* pDecoderGraph = nullptr;
|
||||
|
||||
ret = avcodec_send_packet(pDecCtx, pPkt); //接收packet解码
|
||||
if (ret < 0) {
|
||||
|
@ -208,7 +208,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame
|
|||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(pDecCtx, pFrame); //解码
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
|
||||
fprintf(stderr, "During decoding eof\n");
|
||||
fprintf(stderr, "During decoding eof\n");
|
||||
return -1;
|
||||
}
|
||||
else if (ret < 0) {
|
||||
|
@ -219,35 +219,35 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame
|
|||
//printf("saving frame %3d\n", pDecCtx->frame_number);
|
||||
fflush(stdout);
|
||||
|
||||
AVFilterContext *pDecoderFilterIn = nullptr, *pDecoderFilterOut = nullptr;
|
||||
AVFilterContext *pDecoderFilterIn = nullptr, *pDecoderFilterOut = nullptr;
|
||||
|
||||
// pFrame->width = ALIGN_DOWN(pFrame->width, 32);
|
||||
// pFrame->height = ALIGN_DOWN(pFrame->height, 32);
|
||||
// printf("pFrame->width: %d\tpFrame->height: %d\n", pFrame->width, pFrame->height);
|
||||
|
||||
|
||||
pDecoderGraph = avfilter_graph_alloc();
|
||||
HardH264FFmpegDecoderConfigureVideoFilters(pDecoderGraph, pDecoderFilterIn, pDecoderFilterOut, pFrame->width, pFrame->height, pFrame->format);
|
||||
|
||||
if (pFrame->format != AV_PIX_FMT_YUV420P){
|
||||
if (pFrame->format != AV_PIX_FMT_YUV420P){
|
||||
DUMP_FRAME(pFrame);
|
||||
ret = av_buffersrc_add_frame(pDecoderFilterIn, pFrame);
|
||||
ret = av_buffersrc_add_frame(pDecoderFilterIn, pFrame);
|
||||
ret = av_buffersink_get_frame_flags(pDecoderFilterOut, pFrame, 0);
|
||||
DUMP_FRAME(pFrame);
|
||||
|
||||
int iSize = pFrame->width * pFrame->height;
|
||||
memcpy(pOutputData, pFrame->data[0], iSize); //Y
|
||||
memcpy(pOutputData+iSize, pFrame->data[1], iSize/4); //U
|
||||
memcpy(pOutputData+iSize+iSize/4, pFrame->data[2], iSize/4); //V
|
||||
*puiOutputDataSize = iSize*3/2;
|
||||
return iSize*3/2;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
int iSize = pFrame->width * pFrame->height;
|
||||
memcpy(pOutputData, pFrame->data[0], iSize); //Y
|
||||
memcpy(pOutputData+iSize, pFrame->data[1], iSize/4); //U
|
||||
memcpy(pOutputData+iSize+iSize/4, pFrame->data[2], iSize/4); //V
|
||||
*puiOutputDataSize = iSize*3/2;
|
||||
return iSize*3/2;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsContext *pSwsCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData,unsigned int* puiOutputDataSize)
|
||||
{
|
||||
int ret;
|
||||
int ret;
|
||||
|
||||
ret = avcodec_send_packet(pDecCtx, pPkt); //接收packet解码
|
||||
if (ret < 0) {
|
||||
|
@ -258,7 +258,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsCo
|
|||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(pDecCtx, pSrcFrame); //解码
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
|
||||
fprintf(stderr, "During decoding eof\n");
|
||||
fprintf(stderr, "During decoding eof\n");
|
||||
return -1;
|
||||
}
|
||||
else if (ret < 0) {
|
||||
|
@ -266,7 +266,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsCo
|
|||
exit(1);
|
||||
}
|
||||
|
||||
// pDecCtx->width = ALIGN_DOWN(pDecCtx->width, 32);
|
||||
// pDecCtx->width = ALIGN_DOWN(pDecCtx->width, 32);
|
||||
// pDecCtx->height = ALIGN_DOWN(pDecCtx->height, 32);
|
||||
|
||||
sws_scale(pSwsCtx,
|
||||
|
@ -280,13 +280,13 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsCo
|
|||
//printf("saving frame %3d\n", pDecCtx->frame_number);
|
||||
fflush(stdout);
|
||||
|
||||
int iSize = pDecCtx->width * pDecCtx->height;
|
||||
int iSize = pDecCtx->width * pDecCtx->height;
|
||||
|
||||
memcpy(pOutputData, pDstFrame->data[0], iSize); //Y
|
||||
memcpy(pOutputData+iSize, pDstFrame->data[1], iSize/4); //U
|
||||
memcpy(pOutputData+iSize+iSize/4, pDstFrame->data[2], iSize/4); //V
|
||||
*puiOutputDataSize = iSize*3/2;
|
||||
return iSize*3/2;
|
||||
}
|
||||
return 0;
|
||||
memcpy(pOutputData, pDstFrame->data[0], iSize); //Y
|
||||
memcpy(pOutputData+iSize, pDstFrame->data[1], iSize/4); //U
|
||||
memcpy(pOutputData+iSize+iSize/4, pDstFrame->data[2], iSize/4); //V
|
||||
*puiOutputDataSize = iSize*3/2;
|
||||
return iSize*3/2;
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -56,7 +56,7 @@ extern "C"
|
|||
frame->linesize[2] \
|
||||
);}
|
||||
|
||||
#define NVIDIA_H264_DECODER "h264_cuvid"
|
||||
#define NVIDIA_H264_DECODER "h264_cuvid"
|
||||
// #define NVIDIA_H264_DECODER "h264_v4l2m2m"
|
||||
|
||||
class HardH264FFmpegDecode
|
||||
|
@ -69,21 +69,21 @@ public:
|
|||
int HardH264FFmpegDecoderDeInit();
|
||||
int HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame *pFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize);
|
||||
int HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsContext *pSwsCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize);
|
||||
|
||||
|
||||
const AVCodec *pCodec_ = nullptr; //解码器
|
||||
AVCodecContext *pCodecCtx_ = nullptr; //上下文
|
||||
AVCodecParserContext *pCodecParserCtx_ = nullptr; //解析器上下文
|
||||
AVFrame *pSrcFrame_ = nullptr;
|
||||
AVFrame *pDstFrame_ = nullptr;
|
||||
AVPacket *pPacket_ = nullptr;
|
||||
SwsContext *pSwsContext_ = nullptr;
|
||||
AVCodecParserContext *pCodecParserCtx_ = nullptr; //解析器上下文
|
||||
AVFrame *pSrcFrame_ = nullptr;
|
||||
AVFrame *pDstFrame_ = nullptr;
|
||||
AVPacket *pPacket_ = nullptr;
|
||||
SwsContext *pSwsContext_ = nullptr;
|
||||
|
||||
uint8_t *pu8OutBuffer_ = nullptr;
|
||||
uint8_t *pu8OutBuffer_ = nullptr;
|
||||
|
||||
private:
|
||||
int HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph, AVFilterContext *pSourceCtx, AVFilterContext *pSinkCtx);
|
||||
int HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGraph *pGraph, AVFilterContext* &pDecoderFilterIn, AVFilterContext* &pDecoderFilterOut, const int iWidth, const int iHeight, const int iFormat);
|
||||
|
||||
|
||||
unsigned int uiWidth_, uiHeight_;
|
||||
|
||||
int iFrameFinished_;
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -211,6 +211,7 @@ void MoveEngine::SingleDeviceProcess(std::shared_ptr<ProcessData> pProcessData,
|
|||
pSaveImgData->bIsEnd = pProcessData->bIsEnd;
|
||||
pSaveImgData->bSaveToFtp = true;
|
||||
pSaveImgData->i64TimeStamp = pProcessData->i64TimeStamp;
|
||||
pSaveImgData->nMonitorState = nType;
|
||||
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pSaveImgData));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue