#include "ContainerStep1InferenceEngine.h" #include //#include "myqueue.h" using namespace ai_matrix; ContainerStep1InferenceEngine::ContainerStep1InferenceEngine() {} ContainerStep1InferenceEngine::~ContainerStep1InferenceEngine() {} APP_ERROR ContainerStep1InferenceEngine::Init() { this->modelConfig_ = Config::getins()->getModelByContainerStep1Config(); this->dataSourceConfig_ = Config::getins()->getDataSourceConfig(); this->identifyConfig_ = Config::getins()->getIdentifyConfig(); strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK); if (iFolderExist == -1) { LogError << "模型:" << modelConfig_.strModelPath << " 不存在!"; return false; } class_num = this->modelConfig_.vecClass.size(); score_threshold = this->modelConfig_.fScoreThreshold; int ret = initModel(); if (ret != APP_ERR_OK) { LogError << "Failed to read model info, ret = " << ret; return ret; } LogInfo << "ContainerStep1InferenceEngine Init ok"; return APP_ERR_OK; } APP_ERROR ContainerStep1InferenceEngine::initModel() { modelinfo.yolov5ClearityModelParam.uiClassNum = class_num; modelinfo.yolov5ClearityModelParam.uiClearNum = clear_num; modelinfo.yolov5ClearityModelParam.uiDetSize = det_size; modelinfo.yolov5ClearityModelParam.fScoreThreshold = score_threshold; modelinfo.yolov5ClearityModelParam.fNmsThreshold = nms_threshold; modelinfo.modelCommonInfo.uiModelWidth = model_width; modelinfo.modelCommonInfo.uiModelHeight = model_height; modelinfo.modelCommonInfo.uiInputSize = input_size; modelinfo.modelCommonInfo.uiOutputSize = output_size; modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL; modelinfo.modelCommonInfo.uiBatchSize = batch_size; modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME; modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME; string strModelName = ""; int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo, strModelName, this->modelConfig_.strModelPath); if (nRet != 0) { LogError << "YoloV5ClassifyInferenceInit nRet:" << nRet; return APP_ERR_COMM_READ_FAIL; } return APP_ERR_OK; } APP_ERROR ContainerStep1InferenceEngine::DeInit() { yolov5model.YoloV5ClearityInferenceDeinit(); LogInfo << "ContainerStep1InferenceEngine DeInit ok"; return APP_ERR_OK; } /** * 过滤无效信息 * @param vecInferenceResult * @param pVTrainStep1Data */ void ContainerStep1InferenceEngine::filterInvalidInfo(std::vector &vecInferenceResult, std::shared_ptr &pVTrainStep1Data) { std::vector vecSpaceInfo; for (auto it = vecInferenceResult.begin(); it != vecInferenceResult.end();) { // 根据配置文件中 设置的识别范围,过滤掉无效数据 if (!(it->bbox[0] >= this->dataSourceConfig_.vecIdentifyAreas[0] && it->bbox[1] >= this->dataSourceConfig_.vecIdentifyAreas[1] && it->bbox[2] <= this->dataSourceConfig_.vecIdentifyAreas[2] && it->bbox[3] <= this->dataSourceConfig_.vecIdentifyAreas[3])) { LogDebug << "frameId:" << pVTrainStep1Data->iFrameId << " 类别:" << it->class_id << " 超出识别区域-识别区域:(" << this->dataSourceConfig_.vecIdentifyAreas[0] << "," << this->dataSourceConfig_.vecIdentifyAreas[1] << "),(" << this->dataSourceConfig_.vecIdentifyAreas[2] << "," << this->dataSourceConfig_.vecIdentifyAreas[2] << ")"; it = vecInferenceResult.erase(it); continue; } // 去除车头时的非车头编号信息 if(pVTrainStep1Data->iTrainStage == MONITOR_MODEL_TRAIN_HEAD ) { LogDebug << " 帧号:" << pVTrainStep1Data->iFrameId << " 大类:" << it->class_id << " 识别于车头位置,无效!"; it = vecInferenceResult.erase(it); continue; } // 过滤掉识别于模型反馈无车状态下的所有大框信息 if (pVTrainStep1Data->iTrainStage == MONITOR_MODEL_NO_TRAIN) { LogDebug << " frameId:" << pVTrainStep1Data->iFrameId << " bigclassid:" << it->class_id <<" 识别于模型反馈的无车状态下,无效!"; it = vecInferenceResult.erase(it); continue; } ++it; } } /** * 获取第1步得分最高框 * inParam : std::vector &vecResult 推理符合结果 * outParam: std::vector &vecResult 每个类别得分最高结果 * return : N/A */ void ContainerStep1InferenceEngine::getMaxScoreResult(std::vector &vecResult) { if (vecResult.size() < 2) return; std::map> mapResult; for (size_t i = 0; i < vecResult.size(); i++) { stDetection stDTemp = vecResult.at(i); if (stDTemp.class_id == T_CONTAINER) { mapResult[CONTAINER].emplace_back(stDTemp); } } //清空之前的结果 vecResult.clear(); // 每个类别中,获取得分最高的框 for (auto iter = mapResult.begin(); iter != mapResult.end(); iter++) { int iMaxPos = -1; for (size_t i = 0; i < iter->second.size(); i++) { if (iMaxPos == -1) { iMaxPos = i; } else if (iter->second.at(i).class_conf > iter->second.at(iMaxPos).class_conf) { iMaxPos = i; } } if (iMaxPos >= 0) { vecResult.emplace_back(iter->second.at(iMaxPos)); } } } /** * 设置大框类型 * inParam : PostSubData &postSubData :推理结果 * outParam: PostSubData &postSubData :推理结果 * return : N/A */ void ContainerStep1InferenceEngine::getTargetType(SingleData &singleData) { if (singleData.iClassId == T_CONTAINER) { singleData.iTargetType = CONTAINER; } } APP_ERROR ContainerStep1InferenceEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } if (!this->identifyConfig_.bContainerDetect) { usleep(1000); //1ms continue; } std::shared_ptr pVTrainStep1Data = std::static_pointer_cast(pVoidData0); if (pVTrainStep1Data->cvImage.empty()) { usleep(1000); //1ms continue; } //进行推理 std::vector vecInferenceResult; yolov5model.YoloV5ClearityInferenceModel(pVTrainStep1Data->cvImage, vecInferenceResult); //过滤无效信息 this->filterInvalidInfo(vecInferenceResult, pVTrainStep1Data); this->getMaxScoreResult(vecInferenceResult); std::shared_ptr pInferenceResultData = std::make_shared(); pInferenceResultData->iFrameId = pVTrainStep1Data->iFrameId; pInferenceResultData->bIsEnd = pVTrainStep1Data->bIsEnd; pInferenceResultData->strTrainDate = pVTrainStep1Data->strTrainDate; pInferenceResultData->strTrainTime = pVTrainStep1Data->strTrainTime; for (size_t j = 0; j < vecInferenceResult.size(); j++) { // 非集装箱大框不处理 if (vecInferenceResult[j].class_id != T_CONTAINER) { continue; } SingleData singledata; singledata.iClassId = vecInferenceResult[j].class_id; singledata.fScore = vecInferenceResult[j].class_conf; singledata.fLTX = vecInferenceResult[j].bbox[0]; singledata.fLTY = vecInferenceResult[j].bbox[1]; singledata.fRBX = vecInferenceResult[j].bbox[2]; singledata.fRBY = vecInferenceResult[j].bbox[3]; singledata.fClear = vecInferenceResult[j].clear_id; this->getTargetType(singledata); pInferenceResultData->vecSingleData.emplace_back(singledata); // LogDebug << " 帧:" << pInferenceResultData->iFrameId // << " --iClassId:" << singledata.iClassId // << " iLine:" << singledata.iLine // << " confidence=" << singledata.fScore // << " lx=" << singledata.fLTX // << " ly=" << singledata.fLTY // << " rx=" << singledata.fRBX // << " ry=" << singledata.fRBY // << " clear:" << singledata.fClear; } outputQueMap_[strPort0_]->push(std::static_pointer_cast(pInferenceResultData), true); } return iRet; }