170 lines
6.2 KiB
C++
170 lines
6.2 KiB
C++
//
|
|
// Created by matrixai on 11/9/24.
|
|
//
|
|
|
|
#include "CornerInferenceEngine.h"
|
|
#include <opencv2/opencv.hpp>
|
|
|
|
using namespace ai_matrix;
|
|
|
|
CornerInferenceEngine::CornerInferenceEngine() {}
|
|
|
|
CornerInferenceEngine::~CornerInferenceEngine() {}
|
|
|
|
APP_ERROR CornerInferenceEngine::Init()
|
|
{
|
|
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
|
|
std::vector<DataSourceConfig> vecDataSourceConfig = Config::getins()->getAllDataSourceConfig();
|
|
if (vecDataSourceConfig.size() <= this->engineId_)
|
|
{
|
|
LogWarn << " -- " << engineName_ << "_" << engineId_ << " dataSource no set, Engine DeInit";
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
this->dataSourceConfig_ = vecDataSourceConfig.at(engineId_);
|
|
this->modelConfig_ = Config::getins()->getModelByCornerConfig();
|
|
this->identifyConfig_ = Config::getins()->getIdentifyConfig();
|
|
int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK);
|
|
if (iFolderExist == -1)
|
|
{
|
|
LogError << "模型:" << modelConfig_.strModelPath << " 不存在!";
|
|
return false;
|
|
}
|
|
class_num = this->modelConfig_.vecClass.size();
|
|
score_threshold = this->modelConfig_.fScoreThreshold;
|
|
input_size = GET_INPUT_SIZE(model_width, model_height);
|
|
det_size = class_num + 5;
|
|
LogWarn << "input_size:" << input_size << "\n"
|
|
<< "output_size:" << output_size << "\n"
|
|
<< "class_num:" << class_num << "\n"
|
|
<< "det_size:" << det_size;
|
|
|
|
int ret = initModel();
|
|
if (ret != APP_ERR_OK)
|
|
{
|
|
LogError << "Failed to read model info, ret = " << ret;
|
|
return ret;
|
|
}
|
|
|
|
LogInfo << "CornerInferenceEngine Init ok";
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
APP_ERROR CornerInferenceEngine::initModel()
|
|
{
|
|
modelinfo.yolov5ModelParam.uiClassNum = class_num;
|
|
modelinfo.yolov5ModelParam.uiDetSize = det_size;
|
|
modelinfo.yolov5ModelParam.fScoreThreshold = score_threshold;
|
|
modelinfo.yolov5ModelParam.fNmsThreshold = nms_threshold;
|
|
|
|
modelinfo.modelCommonInfo.uiModelWidth = model_width;
|
|
modelinfo.modelCommonInfo.uiModelHeight = model_height;
|
|
modelinfo.modelCommonInfo.uiInputSize = input_size;
|
|
modelinfo.modelCommonInfo.uiOutputSize = output_size;
|
|
modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL;
|
|
modelinfo.modelCommonInfo.uiBatchSize = batch_size;
|
|
modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME;
|
|
modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME;
|
|
|
|
string strModelName;
|
|
|
|
int nRet = yolov5model.YoloV5InferenceInit(&modelinfo,
|
|
strModelName,
|
|
this->modelConfig_.strModelPath);
|
|
if (nRet != 0)
|
|
{
|
|
LogError << "YoloV5ClassifyInferenceInit nRet:" << nRet;
|
|
return APP_ERR_COMM_READ_FAIL;
|
|
}
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
APP_ERROR CornerInferenceEngine::DeInit()
|
|
{
|
|
// yolov5model.YoloV5ClearityInferenceDeinit();
|
|
yolov5model.YoloV5InferenceDeinit();
|
|
LogInfo << "CornerInferenceEngine DeInit ok";
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
APP_ERROR CornerInferenceEngine::Process()
|
|
{
|
|
int iRet = APP_ERR_OK;
|
|
|
|
while (!isStop_)
|
|
{
|
|
std::shared_ptr<void> pVoidData0 = nullptr;
|
|
inputQueMap_[strPort0_]->pop(pVoidData0);
|
|
if (nullptr == pVoidData0)
|
|
{
|
|
usleep(1000); //1ms
|
|
continue;
|
|
}
|
|
|
|
std::shared_ptr<VDetectInfo> pVDetectInfo = std::static_pointer_cast<VDetectInfo>(pVoidData0);
|
|
|
|
std::shared_ptr<InferenceResultData> pInferenceResultData = std::make_shared<InferenceResultData>();
|
|
pInferenceResultData->iDataSource = pVDetectInfo->iDataSource;
|
|
pInferenceResultData->bIsEnd = pVDetectInfo->bIsEnd;
|
|
pInferenceResultData->strDetectDate = pVDetectInfo->strDetectDate;
|
|
pInferenceResultData->strDetectTime = pVDetectInfo->strDetectTime;
|
|
if (pVDetectInfo->bIsEnd)
|
|
{
|
|
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
|
|
continue;
|
|
}
|
|
|
|
if (pVDetectInfo->cvImage.empty())
|
|
{
|
|
usleep(1000); //1ms
|
|
continue;
|
|
}
|
|
|
|
//进行推理
|
|
std::vector<stDetection> vecInferenceResult;
|
|
// yolov5model.YoloV5ClearityInferenceModel(pVDetectInfo->cvImage, vecInferenceResult);
|
|
yolov5model.YoloV5InferenceModel(pVDetectInfo->cvImage, vecInferenceResult);
|
|
|
|
//过滤无效信息
|
|
// this->filterInvalidInfo(vecInferenceResult, pVDetectInfo);
|
|
|
|
// this->getMaxScoreResult(vecInferenceResult);
|
|
|
|
pInferenceResultData->iFrameId = pVDetectInfo->iFrameId;
|
|
pInferenceResultData->strDetectDate = pVDetectInfo->strDetectDate;
|
|
pInferenceResultData->strDetectTime = pVDetectInfo->strDetectTime;
|
|
pInferenceResultData->cvImage = pVDetectInfo->cvImage;
|
|
|
|
for (size_t j = 0; j < vecInferenceResult.size(); j++)
|
|
{
|
|
if (vecInferenceResult[j].class_id < 0 || vecInferenceResult[j].class_id > 1)
|
|
{
|
|
continue;
|
|
}
|
|
SingleData singledata;
|
|
singledata.iTargetType = CORNER;
|
|
singledata.iClassId = vecInferenceResult[j].class_id + 10;
|
|
singledata.fScore = vecInferenceResult[j].class_conf;
|
|
singledata.fLTX = vecInferenceResult[j].bbox[0];
|
|
singledata.fLTY = vecInferenceResult[j].bbox[1];
|
|
singledata.fRBX = vecInferenceResult[j].bbox[2];
|
|
singledata.fRBY = vecInferenceResult[j].bbox[3];
|
|
singledata.fClear = vecInferenceResult[j].clear_id;
|
|
|
|
pInferenceResultData->vecSingleData.emplace_back(singledata);
|
|
|
|
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
|
|
// << " 数据源:" << pInferenceResultData->iDataSource
|
|
// << " --iClassId:" << singledata.iClassId
|
|
// << " iLine:" << singledata.iLine
|
|
// << " confidence=" << singledata.fScore
|
|
// << " lx=" << singledata.fLTX
|
|
// << " ly=" << singledata.fLTY
|
|
// << " rx=" << singledata.fRBX
|
|
// << " ry=" << singledata.fRBY
|
|
// << " clear:" << singledata.fClear;
|
|
}
|
|
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
|
|
}
|
|
return APP_ERR_OK;
|
|
} |