优化倒车后的恢复判断。

增加上传接口的用户名密码配置。
This commit is contained in:
Mr.V 2024-05-22 17:25:51 +08:00
parent 665e6b62a7
commit 2e67e97508
14 changed files with 324 additions and 235 deletions

View File

@ -41,8 +41,10 @@ set(SYS_USR_INCLUDE_DIR "/usr/include")
set(SYS_USR_LIB_DIR "/usr/lib")
set(SYS_USR_LOCAL_INCLUDE_DIR "/usr/local/include")
set(SYS_USR_LOCAL_LIB_DIR "/usr/local/lib")
# -- X86使 --
set(AARCH64_LINUX_INCLUDE_DIR "/usr/include/x86_64-linux-gnu")
set(AARCH64_LINUX_LIB_DIR "/usr/lib/x86_64-linux-gnu")
# -- ARM使 --
#set(AARCH64_LINUX_INCLUDE_DIR "/usr/include/aarch64-linux-gnu")
#set(AARCH64_LINUX_LIB_DIR "/usr/lib/aarch64-linux-gnu")

View File

@ -129,37 +129,37 @@ namespace ai_matrix
return std::string(tmp);
}
std::string MyUtils::get_date()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
std::string MyUtils::get_date()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
struct timeval tv;
gettimeofday(&tv, NULL);
struct timeval tv;
gettimeofday(&tv, NULL);
int msec = tv.tv_usec / 1000;
int msec = tv.tv_usec / 1000;
char tmp[12] = { 0 };
sprintf(tmp, "%04d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday);
char tmp[12] = { 0 };
sprintf(tmp, "%04d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday);
return std::string(tmp);
}
return std::string(tmp);
}
std::string MyUtils::get_time()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
std::string MyUtils::get_time()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
struct timeval tv;
gettimeofday(&tv, NULL);
struct timeval tv;
gettimeofday(&tv, NULL);
int msec = tv.tv_usec / 1000;
int msec = tv.tv_usec / 1000;
char tmp[10] = { 0 };
sprintf(tmp, "%02d-%02d-%02d", p->tm_hour, p->tm_min, p->tm_sec);
char tmp[10] = { 0 };
sprintf(tmp, "%02d-%02d-%02d", p->tm_hour, p->tm_min, p->tm_sec);
return std::string(tmp);
}
return std::string(tmp);
}
std::string MyUtils::get_timestamp_log()
{
@ -310,12 +310,12 @@ namespace ai_matrix
return buffer.str();
}
/**
*
* @param filePath
* @param savePath
* @return
*/
/**
*
* @param filePath
* @param savePath
* @return
*/
bool MyUtils::copyFile(std::string filePath, std::string savePath)
{
FILE *fp, *sp;
@ -338,14 +338,14 @@ namespace ai_matrix
}
std::string& MyUtils::replace_all_distinct(std::string &str, const std::string &old_value, const std::string &new_value)
{
for (std::string::size_type pos(0); pos != std::string::npos; pos += new_value.length()) {
if ((pos = str.find(old_value, pos)) != std::string::npos)
str.replace(pos, old_value.length(), new_value);
else break;
}
return str;
}
{
for (std::string::size_type pos(0); pos != std::string::npos; pos += new_value.length()) {
if ((pos = str.find(old_value, pos)) != std::string::npos)
str.replace(pos, old_value.length(), new_value);
else break;
}
return str;
}
/**
*
@ -386,6 +386,23 @@ namespace ai_matrix
return std::string(szTmp);
}
//时间戳转化为时间 毫秒级
std::string MyUtils::Stamp2Time(long long timestamp, bool has_msec)
{
int ms = timestamp % 1000;//取毫秒
time_t tick = (time_t)(timestamp/1000);//转换时间
struct tm tm;
char s[40];
tm = *localtime(&tick);
strftime(s, sizeof(s), "%Y-%m-%d %H:%M:%S", &tm);
std::string str(s);
if (has_msec)
{
str = str+ "." + std::to_string(ms);
}
return str;
}
/**
* 1970
* inParam : N/A
@ -507,7 +524,7 @@ namespace ai_matrix
return true;
}
#ifdef ASCEND
#ifdef ASCEND
/**
* Device数据到Host
* inParam : void *pDeviceBuffer device内存地址
@ -589,8 +606,8 @@ namespace ai_matrix
}
return true;
}
#endif
#endif
/**
*
* inParam : uint64_t i64MilliSeconds
@ -735,7 +752,7 @@ namespace ai_matrix
}
//清空之前的结果
vecResult.clear();
vecResult.clear();
// 每个类别中,获取得分最高的框
for (auto iter = mapResult.begin(); iter != mapResult.end(); iter++)
{

View File

@ -51,10 +51,13 @@ namespace ai_matrix
//获取时间戳
std::string get_timestamp_file();
std::string get_timestamp_log();
//获取日期
std::string get_date();
//获取时间
std::string get_time();
//获取日期
std::string get_date();
//获取时间
std::string get_time();
//时间戳转化为时间 毫秒级
std::string Stamp2Time(long long timestamp, bool has_msec = false);
//创建文件夹
std::string create_dir_name(std::string root, std::string name);
@ -73,16 +76,16 @@ namespace ai_matrix
//bool 转 string
std::string getStringFromBool(bool b);
/**
*
* @param filePath
* @param savePath
* @return
*/
bool copyFile(std::string filePath, std::string savePath);
/**
*
* @param filePath
* @param savePath
* @return
*/
bool copyFile(std::string filePath, std::string savePath);
//替换string中所有指定字符串
std::string& replace_all_distinct(std::string &str, const std::string &old_value, const std::string &new_value);
//替换string中所有指定字符串
std::string& replace_all_distinct(std::string &str, const std::string &old_value, const std::string &new_value);
//获取北京当前日期
std::string GetDate();
@ -99,13 +102,13 @@ namespace ai_matrix
//创建文件夹路径
bool CreateDirPath(std::string strDirPath);
#ifdef ASCEND
#ifdef ASCEND
//拷贝Device数据到Host
bool MemcpyDeviceToHost(std::shared_ptr<void> *pHostData, const void *pDeviceBuffer, uint32_t iBufferSize);
//拷贝Host数据到Device
bool MemcpyHostToDevice(std::shared_ptr<void> *pDeviceData, const void *pHostBuffer, uint32_t iBufferSize, bool bDvppFlag = true);
#endif
#endif
//获取指定毫秒数的对应的日期时间
std::string GetDateTimeByMilliSeconds(uint64_t i64MilliSeconds, bool bFormatFlag = false);

View File

@ -113,6 +113,8 @@ model:
nms_threshold: 0.3
gc_http_open: 1
username: "guest_01"
password: "d55b0f642e817eea24725d2f2a31dd08" # 神东
gc_http_url: "http://192.168.2.211:20004/api/train-carriage/identification/video-save"
gc_gettoken_url: "http://192.168.2.211:20004/api/blade-auth/oauth/token"
gc_image_srv: "http://192.168.2.211:9010/"

View File

@ -118,7 +118,7 @@ bool DataDealEngine::ReadFileInfo(Json::Value &jvFrameInfo, RawData &rawData, st
// LogError << "Failed to read image:" << strImgName;
// return false;
// }
return true;
}
@ -190,14 +190,14 @@ void DataDealEngine::MakeProcessData()
iFrameId = iReRunFrameId;
}
// LogInfo << "sourceid:" << iSourceId << " MakeProcessData origtime:" << moveData_.strTrainName << " iOrigFrameId:" << iOrigFrameId
// << " time:" << strTrainName_ << " iFrameId:" << iFrameId << " bIsEndFlag:" << bIsEndFlag;
LogInfo << "sourceid:" << iSourceId << " MakeProcessData origtime:" << moveData_.strTrainName << " iOrigFrameId:" << iOrigFrameId
<< " time:" << strTrainName_ << " iFrameId:" << iFrameId << " bIsEndFlag:" << bIsEndFlag;
std::string strImgName = strDataDir_ + szCameraNo + std::to_string(iOrigFrameId);
strImgName += (iter->second.iRotate != 0) ? "_rotate.jpg" : ".jpg";
std::string strFileName = strDataDir_ + szCameraNo + std::to_string(iOrigFrameId) + ".txt";
//摄像头读取失败后重试2000次。
//摄像头读取失败后重试30次。
Json::Value jvFrameInfo;
RawData rawData;
bool bRet = false;
@ -228,7 +228,7 @@ void DataDealEngine::MakeProcessData()
pProcessData->bIsEnd = bIsEndFlag;
pProcessData->iDataNO = iDataNO_;
pProcessData->nMonitorState = moveData_.nMonitorState;
if (bRet)
{
i64TimeStampTemp = jvFrameInfo["timeStamp"].asUInt64();
@ -241,7 +241,7 @@ void DataDealEngine::MakeProcessData()
cv::Mat cvframe = cv::imread(pProcessData->strPicFilePath);
int iBufferSize = pProcessData->iWidth * pProcessData->iHeight * 3;
void* pBGRBufferobj = nullptr;
pBGRBufferobj = new uint8_t[iBufferSize];
pBGRBufferobj = new uint8_t[iBufferSize];
memcpy(pBGRBufferobj, cvframe.data, iBufferSize);
pProcessData->pData.reset(pBGRBufferobj, [](void* data){if(data) {delete[] data; data = nullptr;}});
pProcessData->iSize = iBufferSize;
@ -255,15 +255,15 @@ void DataDealEngine::MakeProcessData()
//iRet = outputQueMap_[vecPushPorts[iPort]]->push(std::static_pointer_cast<void>(pProcessData));
PushData(vecPushPorts[iPort], pProcessData);
continue;
}
}
std::shared_ptr<ProcessData> pNewProcessData = std::make_shared<ProcessData>();
*pNewProcessData = *pProcessData;
//iRet = outputQueMap_[vecPushPorts[iPort]]->push(std::static_pointer_cast<void>(pNewProcessData));
PushData(vecPushPorts[iPort], pNewProcessData);
}
}
iOrigDataNO_++;
iDataNO_++;
//每组处理数据需间隔一定时间
@ -291,14 +291,25 @@ APP_ERROR DataDealEngine::Process()
//获取主摄像头检测的状态
std::shared_ptr<void> pVoidData0 = nullptr;
iRet = inputQueMap_[strPort0_]->pop(pVoidData0);
if (nullptr != pVoidData0)
{
std::shared_ptr<MoveData> pMoveData = std::static_pointer_cast<MoveData>(pVoidData0);
// queuwMoveData_.push(*pMoveData);
moveData_ = *pMoveData;
LogDebug << "traindate:" << moveData_.strTrainDate << " trainname:" << moveData_.strTrainName
<< " MoveData frameid:" << moveData_.iFrameId << " IsEnd:" << moveData_.bIsEnd;
}
// LogDebug << "【帧号】" << (iDataNO_ * dataSourceConfig_.iSkipInterval);
// if (queuwMoveData_.size() > 0 && (iDataNO_ * dataSourceConfig_.iSkipInterval) >= queuwMoveData_.front().iFrameId)
// {
// moveData_ = queuwMoveData_.front();
// queuwMoveData_.pop();
// LogDebug << "!!!--- moveDate 更新";
// }
if (!moveData_.bHasTrain)
{
usleep(1000); //1ms
@ -308,7 +319,7 @@ APP_ERROR DataDealEngine::Process()
//第一个数据休眠1s等待图片存入本地
if (iOrigDataNO_ == 1)
{
usleep(1000000); //1s
usleep(1000 * 1000); //1s
}
if (strDataDir_.empty())

View File

@ -8,6 +8,8 @@ ResultToHttpSrvEngine::~ResultToHttpSrvEngine() {}
APP_ERROR ResultToHttpSrvEngine::Init()
{
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
strUsername_ = MyYaml::GetIns()->GetStringValue("username");
strPassword_ = MyYaml::GetIns()->GetStringValue("password");
strURL_ = MyYaml::GetIns()->GetStringValue("gc_http_url");
strGetTokenURL_ = MyYaml::GetIns()->GetStringValue("gc_gettoken_url");
strImageSrv_ = MyYaml::GetIns()->GetPathValue("gc_image_srv");
@ -76,11 +78,11 @@ bool ResultToHttpSrvEngine::GetToken(std::string &strBladeAuth)
curl_mime *pMultipart = curl_mime_init(pCurl_);
curl_mimepart *pPart = curl_mime_addpart(pMultipart);
curl_mime_name(pPart, "username");
curl_mime_data(pPart, "guest_01", CURL_ZERO_TERMINATED);
curl_mime_data(pPart, strUsername_.c_str(), CURL_ZERO_TERMINATED);
pPart = curl_mime_addpart(pMultipart);
curl_mime_name(pPart, "password");
curl_mime_data(pPart, "d55b0f642e817eea24725d2f2a31dd08", CURL_ZERO_TERMINATED);
pPart = curl_mime_addpart(pMultipart);
curl_mime_data(pPart, strPassword_.c_str(), CURL_ZERO_TERMINATED);
pPart = curl_mime_addpart(pMultipart);
curl_mime_name(pPart, "tenantId");
curl_mime_data(pPart, "000000", CURL_ZERO_TERMINATED);
pPart = curl_mime_addpart(pMultipart);
@ -426,7 +428,8 @@ APP_ERROR ResultToHttpSrvEngine::Process()
jvRequest["isTheLast"] = pTrain->bIsEnd ? 1 : 0; // 是否最后一节: 0:否,1:是
jvRequest["startFrame"] = pTrain->iStartFrameId; //车厢开始帧
jvRequest["endFrame"] = pTrain->iEndFrameId; //车厢结束帧
jvRequest["skipFrame"] = dataSourceConfig.iSkipInterval; //跳帧
jvRequest["skipFrame"] = dataSourceConfig.iSkipInterval;
jvRequest["collectTime"] = MyUtils::getins()->Stamp2Time(pTrain->i64EndTimeStamp, true);//车厢切分的时间 //跳帧
if (!ResultToHttpSrv(jvRequest))
{
// SaveHttpFailInfo(jvRequest, strFailSavePath_);

View File

@ -40,6 +40,8 @@ private:
bool SaveHttpFailInfo(Json::Value &jvRequest, std::string &strFilePath);
std::string strPort0_;
std::string strUsername_;
std::string strPassword_;
std::string strURL_;
std::string strGetTokenURL_;
std::string strImageSrv_;

View File

@ -30,6 +30,7 @@ APP_ERROR FilterTrainStepOneEngine::Init()
iChkStopCount_ = MyYaml::GetIns()->GetIntValue("gc_chkstop_count");
iPartitionFrameNum_ = MyYaml::GetIns()->GetIntValue("partition_frame_span");
iPlitFrameSpanPX_ = MyYaml::GetIns()->GetIntValue("gc_split_frame_span_px");
iPushDirection_ = MyYaml::GetIns()->GetIntValue("gc_push_direction");
//获取主摄像头信息
mainCfg_ = MyYaml::GetIns()->GetDataSourceConfigById(0);
@ -205,6 +206,13 @@ void FilterTrainStepOneEngine::AddBackInfo(std::shared_ptr<ProcessData> pProcess
{
return;
}
if (iDirection_ == DIRECTION_RIGHT
&& trainBackInfo.strAllClassType == "SPACE"
&& (trainBackInfoTop.strAllClassType == "NUMSPACE" || trainBackInfoTop.strAllClassType == "SPACENUM"))
{
return;
}
if (iDirection_ == DIRECTION_LEFT
&& trainBackInfo.strAllClassType == "SPACE"
&& (trainBackInfoTop.strAllClassType == "NUMSPACE" || trainBackInfoTop.strAllClassType == "SPACENUM"))
@ -262,20 +270,34 @@ bool FilterTrainStepOneEngine::IsEndDealBackInfo(std::shared_ptr<ProcessData> pP
for (size_t i = 0; i < pPostDataBack->vecPostSubData.size(); i++)
{
bool bFlag = (pPostDataBack->vecPostSubData[i].step1Location.fLTX <= pPostData->vecPostSubData[i].step1Location.fLTX);
LogDebug << "帧:" << pProcessData->iFrameId << " 倒车前帧:" << pPostDataBack->iFrameId << " 恢复到原位:" << bFlag
<< " 当前框位置:" << pPostData->vecPostSubData[i].step1Location.fLTX
<< " 倒车前位置:" << pPostDataBack->vecPostSubData[i].step1Location.fLTX;
if ((iDirection_ == DIRECTION_LEFT && !bFlag) ||
(iDirection_ == DIRECTION_RIGHT && bFlag))
int bFlag = -1;
for (size_t j = 0; j < pPostData->vecPostSubData.size(); j++)
{
if (pPostDataBack->vecPostSubData[i].iBigClassId == pPostData->vecPostSubData[j].iBigClassId)
{
if (pPostData->vecPostSubData[j].step1Location.fLTX < 1 || pPostDataBack->vecPostSubData[i].step1Location.fLTX < 1)
{
LogDebug << "大框X坐标小于1判定为异常大框。过滤";
break;
}
bFlag = (pPostDataBack->vecPostSubData[i].step1Location.fLTX <= pPostData->vecPostSubData[j].step1Location.fLTX) ? 1 : 0;
LogDebug << "帧:" << pProcessData->iFrameId << " 倒车前帧:" << pPostDataBack->iFrameId << " 恢复到原位:" << bFlag
<< " 当前框位置:" << pPostData->vecPostSubData[i].step1Location.fLTX
<< " 倒车前位置:" << pPostDataBack->vecPostSubData[i].step1Location.fLTX << "方向:" << iDirection_;
}
}
if ((iDirection_ == DIRECTION_LEFT && bFlag == 0) ||
(iDirection_ == DIRECTION_RIGHT && bFlag == 1))
{
bPopFlag = true;
break;
}
}
if (bPopFlag)
{
LogDebug << "frameId:" << pProcessData->iFrameId << " last one bPopFlag:" << bPopFlag;
LogDebug << "frameId:" << pProcessData->iFrameId << " 恢复倒车前的位置:" << bPopFlag;
stackBackInfo_.pop();
}
}
@ -767,7 +789,8 @@ void FilterTrainStepOneEngine::CalculateDirection(std::shared_ptr<ProcessData> p
}
void FilterTrainStepOneEngine::sendComeTrain(const std::string strTrainDate, const std::string strTrainName, const int iDirection) {
std::string message = "{\"cometime\":\"" + strTrainDate + " " + strTrainName + "\",\"type\":\"1\",\"direction\":\"" + to_string(iDirection) + "\"}";
std::string message = "{\"cometime\":\"" + strTrainDate + " " + strTrainName + "\",\"type\":\"1\",\"direction\":" + to_string(iDirection == iPushDirection_ ? 1:-1) + "}";
LogWarn << message;
outputQueMap_[engineName_ + "_" + std::to_string(engineId_) + "_1"]->push(std::static_pointer_cast<void>(std::make_shared<std::string>(message)));
}
@ -915,6 +938,7 @@ void FilterTrainStepOneEngine::DealProcessDataPre(std::shared_ptr<ProcessData> p
{
//CalculateDirection(iterProcessData->second);
CalculateDirectionNew(iterProcessData->second);
if (iDirection_ != DIRECTION_UNKNOWN) this->sendComeTrain(pProcessData->strTrainDate, pProcessData->strTrainName, iDirection_);
}
if (iDirection_ != DIRECTION_UNKNOWN)

View File

@ -58,6 +58,7 @@ private:
int iChkStopPX_;
int iChkStopCount_;
int iDirection_; //方向
int iPushDirection_; //需要识别的方向
int rightFirst_; // 向右行驶的在前大框类型
int leftFirst_; // 向左行驶的在前大框类型
int iPartitionFrameNum_; //满足跨车厢的帧间隔

View File

@ -83,7 +83,10 @@ bool SaveCsvEngine::SaveMergerCsv(std::shared_ptr<Train> pTrain)
<< "inspection" << ','
<< "inspectionImg" << ','
<< "containerImg_1" << ','
<< "containerImg_2" << std::endl;
<< "containerImg_2" << ','
<< "startTime" << ','
<< "endTime"
<< std::endl;
}
std::string strTime = pTrain->strTrainName;
@ -147,7 +150,10 @@ bool SaveCsvEngine::SaveMergerCsv(std::shared_ptr<Train> pTrain)
<< pTrain->chkDate.strChkDate1DeadLine << ','
<< szChkDateImgPath << ','
<< szContainer1ImgPath << ','
<< szContainer2ImgPath << std::endl;
<< szContainer2ImgPath << ','
<< MyUtils::getins()->Stamp2Time(pTrain->i64StartTimeStamp, true) << ','
<< MyUtils::getins()->Stamp2Time(pTrain->i64EndTimeStamp, true)
<< std::endl;
outFile.close();
}
@ -427,18 +433,18 @@ bool SaveCsvEngine::SaveContainerCsv(std::shared_ptr<TrainContainer> pTrainConta
catch (const std::exception &)
{
LogError << "strCsvPath:" << strCsvPath << " container savecsv fail!";
continue;
continue;
}
}
return true;
}
APP_ERROR SaveCsvEngine::Process()
{
{
int iRet = APP_ERR_OK;
while (!isStop_)
{
bool bPopFlag = false;
//pop端口0 车厢信息
std::shared_ptr<void> pVoidData0 = nullptr;

View File

@ -2,54 +2,54 @@
using namespace std;
HardH264FFmpegDecode::HardH264FFmpegDecode()
HardH264FFmpegDecode::HardH264FFmpegDecode()
{
;
;
}
HardH264FFmpegDecode::~HardH264FFmpegDecode()
HardH264FFmpegDecode::~HardH264FFmpegDecode()
{
;
;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsigned int uiHeight, unsigned int uiFrameRate)
{
uiWidth_ = uiWidth; uiHeight_ = uiHeight;
uiFrameRate_ = uiFrameRate;
iFrameFinished_ = 0;
uiWidth_ = uiWidth; uiHeight_ = uiHeight;
uiFrameRate_ = uiFrameRate;
iFrameFinished_ = 0;
av_log_set_level(AV_LOG_ERROR);
// AVCodecID codec_id = AV_CODEC_ID_H264; //解码H264
// pCodec_ = avcodec_find_decoder(codec_id); //获取解码器
av_log_set_level(AV_LOG_ERROR);
pCodec_ = avcodec_find_decoder_by_name(NVIDIA_H264_DECODER);
// AVCodecID codec_id = AV_CODEC_ID_H264; //解码H264
// pCodec_ = avcodec_find_decoder(codec_id); //获取解码器
pCodec_ = avcodec_find_decoder_by_name(NVIDIA_H264_DECODER);
if (!pCodec_) {
fprintf(stderr, "Codec '%s' not found\n", pCodec_->long_name);
exit(1);
}
printf("Codec found with name %d(%s)\n", pCodec_->id, pCodec_->long_name);
printf("Codec found with name %d(%s)\n", pCodec_->id, pCodec_->long_name);
//创建上下文
pCodecCtx_ = avcodec_alloc_context3(pCodec_);
//创建上下文
pCodecCtx_ = avcodec_alloc_context3(pCodec_);
if (!pCodecCtx_){
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
//创建解析器
pCodecParserCtx_ = av_parser_init(pCodec_->id);
if (!pCodecParserCtx_){
//创建解析器
pCodecParserCtx_ = av_parser_init(pCodec_->id);
if (!pCodecParserCtx_){
fprintf(stderr, "parser not found\n");
exit(1);
}
}
//if(pCodec_->capabilities&CODEC_CAP_TRUNCATED)
// pCodecCtx_->flags|= CODEC_FLAG_TRUNCATED;
//打开解码器
// pCodecCtx_->flags|= CODEC_FLAG_TRUNCATED;
//打开解码器
int ret = avcodec_open2(pCodecCtx_, pCodec_, nullptr);
if (ret < 0) {
if (ret < 0) {
fprintf(stderr, "Could not open codec\n");
printf("avcodec_open2 ret is: %d\n",ret);
exit(1);
@ -63,7 +63,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsign
}
// av_init_packet(pPacket_);
//分配frame
//分配frame
pSrcFrame_ = av_frame_alloc();
if (!pSrcFrame_) {
fprintf(stderr, "Could not allocate video src pFrame\n");
@ -78,14 +78,14 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsign
printf("after align down video_width: %d, video_height: %d\n", uiWidth_, uiHeight_);
//初始化解析器参数
pCodecCtx_->time_base.num = 1;
pCodecCtx_->frame_number = 1; //每包一个视频帧
pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx_->bit_rate = 0;
pCodecCtx_->time_base.den = uiFrameRate_;//帧率
pCodecCtx_->width = uiWidth_; //视频宽
pCodecCtx_->height = uiHeight_; //视频高
//初始化解析器参数
pCodecCtx_->time_base.num = 1;
pCodecCtx_->frame_number = 1; //每包一个视频帧
pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx_->bit_rate = 0;
pCodecCtx_->time_base.den = uiFrameRate_;//帧率
pCodecCtx_->width = uiWidth_; //视频宽
pCodecCtx_->height = uiHeight_; //视频高
// pCodecCtx_->pix_fmt = AV_PIX_FMT_YUV420P;
int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
@ -102,46 +102,46 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderInit(unsigned int uiWidth, unsign
printf("pDstFrame_->linesize: %d, bufferSize: %d\n", pDstFrame_->linesize, bufferSize);
pSwsContext_ = sws_getContext(pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt,
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
printf("pCodecCtx_->width: %d, pCodecCtx_->height: %d, pCodecCtx_->pix_fmt: %d\n", pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt);
return 0;
return 0;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderDeInit()
{
if(pu8OutBuffer_){
if(pu8OutBuffer_){
av_free(pu8OutBuffer_);
pu8OutBuffer_ = nullptr;
}
if(pSrcFrame_){
av_frame_free(&pSrcFrame_);
pSrcFrame_ = nullptr;
}
if(pDstFrame_){
av_frame_free(&pDstFrame_);
pDstFrame_ = nullptr;
}
if(pPacket_){
av_packet_free(&pPacket_);
if(pSrcFrame_){
av_frame_free(&pSrcFrame_);
pSrcFrame_ = nullptr;
}
if(pDstFrame_){
av_frame_free(&pDstFrame_);
pDstFrame_ = nullptr;
}
if(pPacket_){
av_packet_free(&pPacket_);
pPacket_ = nullptr;
}
if(pCodecParserCtx_){
av_parser_close(pCodecParserCtx_);
pCodecParserCtx_ = nullptr;
}
if(pCodecCtx_){
avcodec_close(pCodecCtx_);
av_free(pCodecCtx_);
pCodecCtx_ = nullptr;
}
if(pCodecParserCtx_){
av_parser_close(pCodecParserCtx_);
pCodecParserCtx_ = nullptr;
}
if(pCodecCtx_){
avcodec_close(pCodecCtx_);
av_free(pCodecCtx_);
pCodecCtx_ = nullptr;
}
if(pSwsContext_){
sws_freeContext(pSwsContext_);
pSwsContext_ = nullptr;
}
if(pSwsContext_){
sws_freeContext(pSwsContext_);
pSwsContext_ = nullptr;
}
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph, AVFilterContext *pSourceCtx, AVFilterContext *pSinkCtx)
@ -149,7 +149,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph
int ret;
AVFilterInOut *pOutputs = nullptr, *pInputs = nullptr;
if ((ret = avfilter_link(pSourceCtx, 0, pSinkCtx, 0)) >= 0){
ret = avfilter_graph_config(pGraph, nullptr);
ret = avfilter_graph_config(pGraph, nullptr);
}
avfilter_inout_free(&pOutputs);
@ -168,14 +168,14 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGra
"video_size=%dx%d:pix_fmt=%d:time_base=1/1200000",
iWidth, iHeight, iFormat);
if ((ret = avfilter_graph_create_filter(&pFiltSrc,
avfilter_get_by_name("buffer"), "ffplay_buffer", BufferSrcArgs,
nullptr, pGraph)) < 0){
avfilter_get_by_name("buffer"), "ffplay_buffer", BufferSrcArgs,
nullptr, pGraph)) < 0){
goto fail;
}
ret = avfilter_graph_create_filter(&pFiltDst,
avfilter_get_by_name("buffersink"),
"ffplay_buffersink", nullptr, nullptr, pGraph);
avfilter_get_by_name("buffersink"),
"ffplay_buffersink", nullptr, nullptr, pGraph);
if (ret < 0){
goto fail;
}
@ -190,14 +190,14 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGra
pDecoderFilterIn = pFiltSrc;
pDecoderFilterOut = pFiltDst;
fail:
fail:
return ret;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame *pFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize)
{
int ret;
AVFilterGraph* pDecoderGraph = nullptr;
int ret;
AVFilterGraph* pDecoderGraph = nullptr;
ret = avcodec_send_packet(pDecCtx, pPkt); //接收packet解码
if (ret < 0) {
@ -208,7 +208,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame
while (ret >= 0) {
ret = avcodec_receive_frame(pDecCtx, pFrame); //解码
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
fprintf(stderr, "During decoding eof\n");
fprintf(stderr, "During decoding eof\n");
return -1;
}
else if (ret < 0) {
@ -219,35 +219,35 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame
//printf("saving frame %3d\n", pDecCtx->frame_number);
fflush(stdout);
AVFilterContext *pDecoderFilterIn = nullptr, *pDecoderFilterOut = nullptr;
AVFilterContext *pDecoderFilterIn = nullptr, *pDecoderFilterOut = nullptr;
// pFrame->width = ALIGN_DOWN(pFrame->width, 32);
// pFrame->height = ALIGN_DOWN(pFrame->height, 32);
// printf("pFrame->width: %d\tpFrame->height: %d\n", pFrame->width, pFrame->height);
pDecoderGraph = avfilter_graph_alloc();
HardH264FFmpegDecoderConfigureVideoFilters(pDecoderGraph, pDecoderFilterIn, pDecoderFilterOut, pFrame->width, pFrame->height, pFrame->format);
if (pFrame->format != AV_PIX_FMT_YUV420P){
if (pFrame->format != AV_PIX_FMT_YUV420P){
DUMP_FRAME(pFrame);
ret = av_buffersrc_add_frame(pDecoderFilterIn, pFrame);
ret = av_buffersrc_add_frame(pDecoderFilterIn, pFrame);
ret = av_buffersink_get_frame_flags(pDecoderFilterOut, pFrame, 0);
DUMP_FRAME(pFrame);
int iSize = pFrame->width * pFrame->height;
memcpy(pOutputData, pFrame->data[0], iSize); //Y
memcpy(pOutputData+iSize, pFrame->data[1], iSize/4); //U
memcpy(pOutputData+iSize+iSize/4, pFrame->data[2], iSize/4); //V
*puiOutputDataSize = iSize*3/2;
return iSize*3/2;
}
}
return 0;
int iSize = pFrame->width * pFrame->height;
memcpy(pOutputData, pFrame->data[0], iSize); //Y
memcpy(pOutputData+iSize, pFrame->data[1], iSize/4); //U
memcpy(pOutputData+iSize+iSize/4, pFrame->data[2], iSize/4); //V
*puiOutputDataSize = iSize*3/2;
return iSize*3/2;
}
}
return 0;
}
int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData,unsigned int* puiOutputDataSize)
int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsContext *pSwsCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData,unsigned int* puiOutputDataSize)
{
int ret;
int ret;
ret = avcodec_send_packet(pDecCtx, pPkt); //接收packet解码
if (ret < 0) {
@ -258,7 +258,7 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, AVFra
while (ret >= 0) {
ret = avcodec_receive_frame(pDecCtx, pSrcFrame); //解码
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
fprintf(stderr, "During decoding eof\n");
fprintf(stderr, "During decoding eof\n");
return -1;
}
else if (ret < 0) {
@ -266,32 +266,27 @@ int HardH264FFmpegDecode::HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, AVFra
exit(1);
}
// pDecCtx->width = ALIGN_DOWN(pDecCtx->width, 32);
// pDecCtx->width = ALIGN_DOWN(pDecCtx->width, 32);
// pDecCtx->height = ALIGN_DOWN(pDecCtx->height, 32);
// sws_scale(pSwsCtx,
// (const uint8_t *const *)pSrcFrame->data,
// pSrcFrame->linesize,
// 0,
// pDecCtx->height,
// pDstFrame->data,
// pDstFrame->linesize);
sws_scale(pSwsCtx,
(const uint8_t *const *)pSrcFrame->data,
pSrcFrame->linesize,
0,
pDecCtx->height,
pDstFrame->data,
pDstFrame->linesize);
//printf("saving frame %3d\n", pDecCtx->frame_number);
// fflush(stdout);
fflush(stdout);
// int iSize = pDecCtx->width * pDecCtx->height;
//
// memcpy(pOutputData, pDstFrame->data[0], iSize); //Y
// memcpy(pOutputData+iSize, pDstFrame->data[1], iSize/4); //U
// memcpy(pOutputData+iSize+iSize/4, pDstFrame->data[2], iSize/4); //V
// *puiOutputDataSize = iSize*3/2;
// return iSize*3/2;
memcpy(pOutputData, pSrcFrame->data[0], pSrcFrame->width * pSrcFrame->height); // Y
memcpy(pOutputData + pSrcFrame->width * pSrcFrame->height, pSrcFrame->data[1], pSrcFrame->width * pSrcFrame->height / 4); // U
memcpy(pOutputData + pSrcFrame->width * pSrcFrame->height + pSrcFrame->width * pSrcFrame->height / 4, pSrcFrame->data[2], pSrcFrame->width * pSrcFrame->height / 4); // V
*puiOutputDataSize = pSrcFrame->width * pSrcFrame->height * 3 / 2;
return pSrcFrame->width * pSrcFrame->height * 3 / 2;
}
return 0;
int iSize = pDecCtx->width * pDecCtx->height;
memcpy(pOutputData, pDstFrame->data[0], iSize); //Y
memcpy(pOutputData+iSize, pDstFrame->data[1], iSize/4); //U
memcpy(pOutputData+iSize+iSize/4, pDstFrame->data[2], iSize/4); //V
*puiOutputDataSize = iSize*3/2;
return iSize*3/2;
}
return 0;
}

View File

@ -56,7 +56,7 @@ extern "C"
frame->linesize[2] \
);}
#define NVIDIA_H264_DECODER "h264_cuvid"
#define NVIDIA_H264_DECODER "h264_cuvid"
// #define NVIDIA_H264_DECODER "h264_v4l2m2m"
class HardH264FFmpegDecode
@ -68,22 +68,22 @@ public:
int HardH264FFmpegDecoderInit(unsigned int uiWidth, unsigned int uiHeight, unsigned int uiFrameRate = 30);
int HardH264FFmpegDecoderDeInit();
int HardH264FFmpegDecoder(AVCodecContext *pDecCtx, AVFrame *pFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize);
int HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize);
int HardH264FFmpegDecoderV2(AVCodecContext *pDecCtx, SwsContext *pSwsCtx, AVFrame *pSrcFrame, AVFrame *pDstFrame, AVPacket *pPkt, void* pOutputData, unsigned int* puiOutputDataSize);
const AVCodec *pCodec_ = nullptr; //解码器
AVCodecContext *pCodecCtx_ = nullptr; //上下文
AVCodecParserContext *pCodecParserCtx_ = nullptr; //解析器上下文
AVFrame *pSrcFrame_ = nullptr;
AVFrame *pDstFrame_ = nullptr;
AVPacket *pPacket_ = nullptr;
SwsContext *pSwsContext_ = nullptr;
AVCodecParserContext *pCodecParserCtx_ = nullptr; //解析器上下文
AVFrame *pSrcFrame_ = nullptr;
AVFrame *pDstFrame_ = nullptr;
AVPacket *pPacket_ = nullptr;
SwsContext *pSwsContext_ = nullptr;
uint8_t *pu8OutBuffer_ = nullptr;
uint8_t *pu8OutBuffer_ = nullptr;
private:
int HardH264FFmpegDecoderFilterGraph(AVFilterGraph *pGraph, AVFilterContext *pSourceCtx, AVFilterContext *pSinkCtx);
int HardH264FFmpegDecoderConfigureVideoFilters(AVFilterGraph *pGraph, AVFilterContext* &pDecoderFilterIn, AVFilterContext* &pDecoderFilterOut, const int iWidth, const int iHeight, const int iFormat);
unsigned int uiWidth_, uiHeight_;
int iFrameFinished_;

View File

@ -56,7 +56,7 @@ APP_ERROR VideoDecodeEngine::Process()
}
int iRet = APP_ERR_OK;
int iSkipCount = 1;
int iSkipCount = 1;
int iNoCameraDataCnt = 0;
while (!isStop_)
{
@ -67,19 +67,19 @@ APP_ERROR VideoDecodeEngine::Process()
{
usleep(10*1000); //10ms
// iNoCameraDataCnt++;
// if (iNoCameraDataCnt >= 1000) //10秒内收不到认为相机断开
// {
// LogError << "engineId:" << engineId_ << " 超过10秒获取到摄像头数据疑似摄像头断开。计数" << iNoCameraDataCnt;
// iNoCameraDataCnt = 0;
// //camera异常时构造空的解码数据push确保一直有数据流转到后面Engine
// std::shared_ptr<ProcessData> pProcessData = std::make_shared<ProcessData>();
// pProcessData->iDataSource = engineId_;
// pProcessData->i64TimeStamp = MyUtils::getins()->GetCurrentTimeMillis();
// pProcessData->iSize = 0;
// pProcessData->pData = nullptr;
// iRet = outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pProcessData));
// }
// iNoCameraDataCnt++;
// if (iNoCameraDataCnt >= 1000) //10秒内收不到认为相机断开
// {
// LogError << "engineId:" << engineId_ << " 超过10秒获取到摄像头数据疑似摄像头断开。计数" << iNoCameraDataCnt;
// iNoCameraDataCnt = 0;
// //camera异常时构造空的解码数据push确保一直有数据流转到后面Engine
// std::shared_ptr<ProcessData> pProcessData = std::make_shared<ProcessData>();
// pProcessData->iDataSource = engineId_;
// pProcessData->i64TimeStamp = MyUtils::getins()->GetCurrentTimeMillis();
// pProcessData->iSize = 0;
// pProcessData->pData = nullptr;
// iRet = outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pProcessData));
// }
continue;
}
@ -111,14 +111,15 @@ APP_ERROR VideoDecodeEngine::Process()
std::shared_ptr<void> pYUVData;
pYUVData.reset(pYUV420MBuffer, [](void *data){if(data) {delete[] data; data = nullptr;}}); //智能指针管理内存
hard_h264_ffmpeg_decoder_->pPacket_->data = static_cast<uint8_t *>(pProcessData->pData.get()); //这里填入一个指向完整H264数据帧的指针
hard_h264_ffmpeg_decoder_->pPacket_->size = pProcessData->iSize; //这个填入H264数据帧的大小
hard_h264_ffmpeg_decoder_->pPacket_->data = static_cast<uint8_t *>(pProcessData->pData.get()); //这里填入一个指向完整H264数据帧的指针
hard_h264_ffmpeg_decoder_->pPacket_->size = pProcessData->iSize; //这个填入H264数据帧的大小
// H264硬件解码
// int iDecodeRet= hard_h264_ffmpeg_decoder_->HardH264FFmpegDecoderV2(hard_h264_ffmpeg_decoder_->pCodecCtx_, hard_h264_ffmpeg_decoder_->pFrame_,
// hard_h264_ffmpeg_decoder_->pPacket_, pYUV420MBuffer, &pYUV420MBuffer_Size);
int iDecodeRet = hard_h264_ffmpeg_decoder_->HardH264FFmpegDecoderV2(hard_h264_ffmpeg_decoder_->pCodecCtx_,
hard_h264_ffmpeg_decoder_->pSwsContext_,
hard_h264_ffmpeg_decoder_->pSrcFrame_,
hard_h264_ffmpeg_decoder_->pDstFrame_,
hard_h264_ffmpeg_decoder_->pPacket_,

View File

@ -14,7 +14,7 @@ APP_ERROR TrainStepOneEngine::Init()
bUseEngine_ = MyUtils::getins()->ChkIsHaveTarget("NUM");
if (!bUseEngine_)
{
LogWarn << "engineId_:" << engineId_ << " not use engine";
LogInfo << "engineId_:" << engineId_ << " not use engine";
return APP_ERR_OK;
}
@ -98,7 +98,7 @@ APP_ERROR TrainStepOneEngine::InitModel()
int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo, strModelName, modelConfig_.strOmPath);
if (nRet != 0)
{
LogInfo << "YoloV5ClassifyInferenceInit nRet:" << nRet;
LogError << "YoloV5ClassifyInferenceInit nRet:" << nRet;
return APP_ERR_COMM_READ_FAIL;
}
return APP_ERR_OK;
@ -147,7 +147,7 @@ APP_ERROR TrainStepOneEngine::DeInit()
{
if (!bUseEngine_)
{
LogWarn << "engineId_:" << engineId_ << " not use engine";
LogInfo << "engineId_:" << engineId_ << " not use engine";
return APP_ERR_OK;
}
@ -170,7 +170,7 @@ void TrainStepOneEngine::PushData(const std::string &strPort, const std::shared_
int iRet = outputQueMap_[strPort]->push(std::static_pointer_cast<void>(pProcessData));
if (iRet != 0)
{
LogDebug << "sourceid:" << pProcessData->iDataSource << " frameid:" << pProcessData->iFrameId << " push fail iRet:" << iRet;
LogError << " frameid:" << pProcessData->iFrameId << " push fail iRet:" << iRet;
if (iRet == 2)
{
usleep(10000); // 10ms
@ -204,9 +204,9 @@ void TrainStepOneEngine::FilterInvalidInfo(std::vector<stDetection> &vecRet, std
it->bbox[3] <= dataSourceCfg.fIdentifyAreasRBY))
{
LogDebug << "frameId:" << pProcessData->iFrameId
<< " bigclassid:" << it->class_id << " 超出识别区域-识别区域:("
<< dataSourceCfg.fIdentifyAreasLTX << "," << dataSourceCfg.fIdentifyAreasLTY << "),("
<< dataSourceCfg.fIdentifyAreasRBX << "," << dataSourceCfg.fIdentifyAreasRBY << ")";
<< " bigclassid:" << it->class_id << " 超出识别区域-识别区域:("
<< dataSourceCfg.fIdentifyAreasLTX << "," << dataSourceCfg.fIdentifyAreasLTY << "),("
<< dataSourceCfg.fIdentifyAreasRBX << "," << dataSourceCfg.fIdentifyAreasRBY << ")";
it = vecRet.erase(it);
continue;
}
@ -219,19 +219,33 @@ void TrainStepOneEngine::FilterInvalidInfo(std::vector<stDetection> &vecRet, std
continue;
}
// 去除车头车尾的间隔信息
// 去除车头时的非车头编号信息
if(pProcessData->nMonitorState == MONITOR_MODEL_TRAIN_HEAD )
{
if(it->class_id != TRAIN_HEAD)
{
LogDebug << " 帧号:" << pProcessData->iFrameId
<< " 大类:" << it->class_id << " 识别于车头位置,无效!";
<< " 大类:" << it->class_id << " 识别于车头位置,无效!";
it = vecRet.erase(it);
continue;
}
}
// 去除车尾的车头编号信息
if (pProcessData->nMonitorState != MONITOR_MODEL_TRAIN_HEAD)
{
if (it->class_id == TRAIN_HEAD)
{
LogDebug << " 帧号:" << pProcessData->iFrameId
<< " 大类:" << it->class_id << " 识别于非车头位置,无效!";
it = vecRet.erase(it);
continue;
}
}
// 去除车尾的间隔信息
if (pProcessData->nMonitorState == MONITOR_MODEL_TRAIN_TAIL
&& ((it->class_id >= 9 && it->class_id <= 17 && it->class_id != 15) || it->class_id == 18))
&& ((it->class_id >= 9 && it->class_id <= 17 && it->class_id != 15) || it->class_id == 18))
{
LogDebug << " frameId:" << pProcessData->iFrameId
<< " bigclassid:" << it->class_id
@ -239,6 +253,17 @@ void TrainStepOneEngine::FilterInvalidInfo(std::vector<stDetection> &vecRet, std
it = vecRet.erase(it);
continue;
}
// 过滤掉识别于模型反馈无车状态下的所有大框信息
if (pProcessData->nMonitorState == MONITOR_MODEL_NO_TRAIN)
{
LogDebug << " frameId:" << pProcessData->iFrameId
<< " bigclassid:" << it->class_id
<<" 识别于模型反馈的无车状态下,无效!";
it = vecRet.erase(it);
continue;
}
// 按大框高度剔除远股道识别的信息
int iClassHeight = it->bbox[3] - it->bbox[1];
if (dataSourceCfg.mapClassMinH.find(it->class_id) != dataSourceCfg.mapClassMinH.end() &&
@ -273,7 +298,7 @@ void TrainStepOneEngine::FilterInvalidInfo(std::vector<stDetection> &vecRet, std
continue;
}
if (((it->class_id >= 2 && it->class_id <= 6) || it->class_id == J_TRAIN_NUM || it->class_id == W_TRAIN_NUM) &&
(it->bbox[3] - it->bbox[1]) < MyYaml::GetIns()->GetIntValue("gc_num_frame_height"))
(it->bbox[3] - it->bbox[1]) < MyYaml::GetIns()->GetIntValue("gc_num_frame_height"))
{
LogWarn << "疑似误识别到远股道车号,帧号:" << pProcessData->iFrameId
<< "大框高度:" << (it->bbox[3] - it->bbox[1]);
@ -282,7 +307,7 @@ void TrainStepOneEngine::FilterInvalidInfo(std::vector<stDetection> &vecRet, std
}
if ((it->class_id == 1 || it->class_id == TRAIN_PRO)
&& (it->bbox[3] - it->bbox[1]) < MyYaml::GetIns()->GetIntValue("gc_pro_frame_height")) {
&& (it->bbox[3] - it->bbox[1]) < MyYaml::GetIns()->GetIntValue("gc_pro_frame_height")) {
LogWarn << "疑似误识别到远股道属性,帧号:" << pProcessData->iFrameId
<< "大框高度:" << (it->bbox[3] - it->bbox[1]);
it = vecRet.erase(it);
@ -321,7 +346,7 @@ void TrainStepOneEngine::FilterInvalidInfo(std::vector<stDetection> &vecRet, std
int iCenterY = pProcessData->iHeight / 2;
if (iHeight0 < iCenterY && iHeight1 < iCenterY) //非平车
{
if (!((vecRet[0].class_id >= 9 && vecRet[0].class_id <= 17 && vecRet[0].class_id != 15) || vecRet[0].class_id == U_TRAIN_SPACE) &&
if (!((vecRet[0].class_id >= 9 && vecRet[0].class_id <= 17 && vecRet[0].class_id != 15) || vecRet[0].class_id == U_TRAIN_SPACE) &&
!((vecRet[1].class_id >= 9 && vecRet[1].class_id <= 17 && vecRet[1].class_id != 15) || vecRet[1].class_id == U_TRAIN_SPACE))
{
LogDebug << " frameId:" << pProcessData->iFrameId << " no space";
@ -388,7 +413,7 @@ APP_ERROR TrainStepOneEngine::Process()
{
if (!bUseEngine_)
{
LogWarn << "engineId_:" << engineId_ << " not use engine";
LogInfo << "engineId_:" << engineId_ << " not use engine";
return APP_ERR_OK;
}
int iRet = APP_ERR_OK;
@ -409,7 +434,7 @@ APP_ERROR TrainStepOneEngine::Process()
pPostData->iModelType = MODELTYPE_NUM;
pPostData->nMonitorState = pProcessData->nMonitorState; //来车检测的四个分类
//获取图片
//获取图片
if (pProcessData->iStatus == TRAINSTATUS_RUN || pProcessData->bIsEnd)
{
if (pProcessData->pData != nullptr && pProcessData->iSize != 0)
@ -418,10 +443,7 @@ APP_ERROR TrainStepOneEngine::Process()
//进行推理
std::vector<stDetection> res;
//auto start = std::chrono::system_clock::now(); //计时开始
yolov5model.YoloV5ClearityInferenceModel(img, res);
//auto end = std::chrono::system_clock::now();
//LogInfo << "nopr1 inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms";
//过滤无效信息
FilterInvalidInfo(res, pProcessData);