VCarContainer/engine/Step1InferenceEngine/ContainerStep1InferenceEngi...

337 lines
12 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include "ContainerStep1InferenceEngine.h"
//#include "myqueue.h"
using namespace ai_matrix;
namespace
{
//按照x坐标排列
bool CompareX(const stDetection &a, const stDetection &b)
{
return a.bbox[0] < b.bbox[0];
}
}
ContainerStep1InferenceEngine::ContainerStep1InferenceEngine() {}
ContainerStep1InferenceEngine::~ContainerStep1InferenceEngine() {}
APP_ERROR ContainerStep1InferenceEngine::Init()
{
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
std::vector<DataSourceConfig> vecDataSourceConfig = Config::getins()->getAllDataSourceConfig();
if (vecDataSourceConfig.size() <= this->engineId_)
{
LogWarn << " -- " << engineName_ << "_" << engineId_ << " dataSource no set, Engine DeInit";
return APP_ERR_OK;
}
this->dataSourceConfig_ = vecDataSourceConfig.at(engineId_);
this->modelConfig_ = Config::getins()->getModelByContainerStep1Config();
this->identifyConfig_ = Config::getins()->getIdentifyConfig();
int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK);
if (iFolderExist == -1)
{
LogError << "模型:" << modelConfig_.strModelPath << " 不存在!";
return false;
}
class_num = this->modelConfig_.vecClass.size();
score_threshold = this->modelConfig_.fScoreThreshold;
input_size = GET_INPUT_SIZE(model_width, model_height);
output_size = GET_OUTPUT_SIZE(model_width, model_height, clear_num, class_num);
det_size = clear_num + class_num + 5;
int ret = initModel();
if (ret != APP_ERR_OK)
{
LogError << "Failed to read model info, ret = " << ret;
return ret;
}
LogInfo << "Step1InferenceEngine Init ok";
return APP_ERR_OK;
}
APP_ERROR ContainerStep1InferenceEngine::initModel()
{
modelinfo.yolov5ClearityModelParam.uiClassNum = class_num;
modelinfo.yolov5ClearityModelParam.uiClearNum = clear_num;
modelinfo.yolov5ClearityModelParam.uiDetSize = det_size;
modelinfo.yolov5ClearityModelParam.fScoreThreshold = score_threshold;
modelinfo.yolov5ClearityModelParam.fNmsThreshold = nms_threshold;
modelinfo.modelCommonInfo.uiModelWidth = model_width;
modelinfo.modelCommonInfo.uiModelHeight = model_height;
modelinfo.modelCommonInfo.uiInputSize = input_size;
modelinfo.modelCommonInfo.uiOutputSize = output_size;
modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL;
modelinfo.modelCommonInfo.uiBatchSize = batch_size;
modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME;
modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME;
string strModelName;
int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo,
strModelName,
this->modelConfig_.strModelPath);
if (nRet != 0)
{
LogError << "YoloV5ClassifyInferenceInit nRet:" << nRet;
return APP_ERR_COMM_READ_FAIL;
}
return APP_ERR_OK;
}
APP_ERROR ContainerStep1InferenceEngine::DeInit()
{
yolov5model.YoloV5ClearityInferenceDeinit();
LogInfo << "Step1InferenceEngine DeInit ok";
return APP_ERR_OK;
}
/**
* 获取第1步得分最高框
* inParam : std::vector<stDetection> &vecResult 推理符合结果
* outParam: std::vector<stDetection> &vecResult 每个类别得分最高结果
* return : N/A
*/
void ContainerStep1InferenceEngine::getMaxScoreResult(std::vector<stDetection> &vecResult)
{
if (vecResult.size() < 2)
{
return;
}
std::map<ModelTarget, std::vector<stDetection>> mapResult;
for (auto stDTemp : vecResult)
{
switch (stDTemp.class_id) {
case T_CONTAINER:
mapResult[T_CONTAINER].emplace_back(stDTemp);
break;
case MIRROR_CONTAINER:
mapResult[MIRROR_CONTAINER].emplace_back(stDTemp);
break;
default:
break;
}
}
//清空之前的结果
vecResult.clear();
// 每个类别中,获取得分最高的框
for (auto & iter : mapResult)
{
int iMaxPos = -1;
for (int i = 0; i < iter.second.size(); i++)
{
if (iMaxPos == -1)
{
iMaxPos = i;
}
else if (iter.second.at(i).class_conf > iter.second.at(iMaxPos).class_conf)
{
iMaxPos = i;
}
}
if (iMaxPos >= 0)
{
vecResult.emplace_back(iter.second.at(iMaxPos));
}
}
}
/**
* 设置大框类型
* inParam : PostSubData &postSubData :推理结果
* outParam: PostSubData &postSubData :推理结果
* return : N/A
*/
void ContainerStep1InferenceEngine::getTargetType(SingleData &singleData)
{
}
/**
* 过滤无效信息
* inParam : std::vector<stDetection> &vecRet :识别结果数据
: std::shared_ptr<VDetectInfo> &pVDetectInfo :帧信息数据
* outParam: N/A
* return : N/A
*/
void ContainerStep1InferenceEngine::filterInvalidInfo(std::vector<stDetection> &vecInferenceResult,
std::shared_ptr<VDetectInfo> &pVDetectInfo)
{
std::vector<stDetection> vecSpaceInfo;
for (auto it = vecInferenceResult.begin(); it != vecInferenceResult.end();)
{
// LogDebug << " 数据源:" << pVDetectInfo->iDataSource
// << " frameId:" << pVDetectInfo->iFrameId
// << " 类别:" << it->class_id
// << " -- [" << it->bbox[0]
// << "," << it->bbox[1]
// << "," << it->bbox[2]
// << "," << it->bbox[3]
// << "]";
// 根据配置文件中 设置的识别范围,过滤掉无效数据
if (!(it->bbox[0] >= this->dataSourceConfig_.vecIdentifyAreas[0] &&
it->bbox[1] >= this->dataSourceConfig_.vecIdentifyAreas[1] &&
it->bbox[2] <= this->dataSourceConfig_.vecIdentifyAreas[2] &&
it->bbox[3] <= this->dataSourceConfig_.vecIdentifyAreas[3]))
{
LogDebug << " 数据源:" << pVDetectInfo->iDataSource
<< " frameId:" << pVDetectInfo->iFrameId
<< " 类别:" << it->class_id
<< " 坐标:[" << it->bbox[0]
<< "," << it->bbox[1]
<< "],[" << it->bbox[2]
<< "," << it->bbox[3]
<< "]"
<< " 超出识别区域-识别区域:("
<< this->dataSourceConfig_.vecIdentifyAreas[0] << ","
<< this->dataSourceConfig_.vecIdentifyAreas[1] << "),("
<< this->dataSourceConfig_.vecIdentifyAreas[2] << ","
<< this->dataSourceConfig_.vecIdentifyAreas[3] << ")";
it = vecInferenceResult.erase(it);
continue;
}
// 剔出反向集装箱
if (it->class_id == MIRROR_CONTAINER)
{
// LogDebug << " 数据源:" << pVDetectInfo->iDataSource
// << " frameId:" << pVDetectInfo->iFrameId
// << " bigclassid:" << it->class_id << " 过滤 剔出反向集装箱";
it = vecInferenceResult.erase(it);
continue;
}
// 剔除高度小于最低限制的大框
if ((it->bbox[3] - it->bbox[1]) < this->identifyConfig_.iTargetMinHeight && pVDetectInfo->iDataSource != 0)
{
// LogDebug << " 数据源:" << pVDetectInfo->iDataSource
// << " frameId:" << pVDetectInfo->iFrameId
// << " bigclassid:" << it->class_id << " 过滤 大框高度小于最小值,疑似非目标区域";
it = vecInferenceResult.erase(it);
continue;
}
// 剔除宽度小于最低限制的大框
if ((it->bbox[2] - it->bbox[0]) < this->identifyConfig_.iTargetMinWidth)
{
// LogDebug << " 数据源:" << pVDetectInfo->iDataSource
// << " frameId:" << pVDetectInfo->iFrameId
// << " bigclassid:" << it->class_id << " 过滤 大框宽度小于最小值,疑似非目标区域";
it = vecInferenceResult.erase(it);
continue;
}
// 剔出左上角坐标在XXX以下的
if (pVDetectInfo->iDataSource != 0 && it->bbox[1] > this->identifyConfig_.iTargetMinY)
{
// LogDebug << " 数据源:" << pVDetectInfo->iDataSource
// << " frameId:" << pVDetectInfo->iFrameId
// << " bigclassid:" << it->class_id << " 过滤 大框低于指定Y轴坐标疑似误识别非目标";
it = vecInferenceResult.erase(it);
continue;
}
++it;
}
}
APP_ERROR ContainerStep1InferenceEngine::Process()
{
int iRet = APP_ERR_OK;
while (!isStop_)
{
std::shared_ptr<void> pVoidData0 = nullptr;
inputQueMap_[strPort0_]->pop(pVoidData0);
if (nullptr == pVoidData0)
{
usleep(1000); //1ms
continue;
}
std::shared_ptr<VDetectInfo> pVDetectInfo = std::static_pointer_cast<VDetectInfo>(pVoidData0);
std::shared_ptr<InferenceResultData> pInferenceResultData = std::make_shared<InferenceResultData>();
pInferenceResultData->iDataSource = pVDetectInfo->iDataSource;
pInferenceResultData->bIsEnd = pVDetectInfo->bIsEnd;
pInferenceResultData->strDetectDate = pVDetectInfo->strDetectDate;
pInferenceResultData->strDetectTime = pVDetectInfo->strDetectTime;
if (pVDetectInfo->bIsEnd)
{
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
continue;
}
if (pVDetectInfo->cvImage.empty())
{
usleep(1000); //1ms
continue;
}
//进行推理
std::vector<stDetection> vecInferenceResult;
yolov5model.YoloV5ClearityInferenceModel(pVDetectInfo->cvImage, vecInferenceResult);
//过滤无效信息
this->filterInvalidInfo(vecInferenceResult, pVDetectInfo);
// this->getMaxScoreResult(vecInferenceResult);
std::sort(vecInferenceResult.begin(),
vecInferenceResult.end(),
CompareX);
pInferenceResultData->iFrameId = pVDetectInfo->iFrameId;
pInferenceResultData->strDetectDate = pVDetectInfo->strDetectDate;
pInferenceResultData->strDetectTime = pVDetectInfo->strDetectTime;
pInferenceResultData->cvImage = pVDetectInfo->cvImage;
// 筛选离中心点坐标最近的,如果一样近就选最右的
float fCenterX = IMAGE_WIDTH/2;
float fCenterY = IMAGE_HEIGHT/2;
float fMin = 9999999.0f; // 最大值
for (auto & inferenceResult : vecInferenceResult)
{
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " 数据源:" << pInferenceResultData->iDataSource
// << " --iClassId:" << inferenceResult.class_id
// << " confidence=" << inferenceResult.class_conf
// << " lx=" << inferenceResult.bbox[0]
// << " ly=" << inferenceResult.bbox[1]
// << " rx=" << inferenceResult.bbox[2]
// << " ry=" << inferenceResult.bbox[3]
// << " clear:" << inferenceResult.clear_conf;
float fCenterX_frame = (inferenceResult.bbox[0] + inferenceResult.bbox[2])/2;
float fCenterY_frame = (inferenceResult.bbox[1] + inferenceResult.bbox[3])/2;
float x = std::abs(fCenterX_frame - fCenterX);
float y = std::abs(fCenterY_frame - fCenterY);
if ((pow(x,2) + pow(y,2)) < fMin)
{
SingleData singledata;
singledata.iTargetType = CONTAINER;
singledata.iClassId = inferenceResult.class_id;
singledata.fScore = inferenceResult.class_conf;
singledata.fLTX = inferenceResult.bbox[0];
singledata.fLTY = inferenceResult.bbox[1];
singledata.fRBX = inferenceResult.bbox[2];
singledata.fRBY = inferenceResult.bbox[3];
singledata.fClear = inferenceResult.clear_id;
pInferenceResultData->singleData = singledata;
fMin = pow(x,2) + pow(y,2);
}
}
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
}
return APP_ERR_OK;
}