VTrain/engine/Step2InferenceEngine/ContainerStep2InferenceEngi...

195 lines
8.0 KiB
C++
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include "ContainerStep2InferenceEngine.h"
#include <opencv2/opencv.hpp>
using namespace ai_matrix;
ContainerStep2InferenceEngine::ContainerStep2InferenceEngine() {}
ContainerStep2InferenceEngine::~ContainerStep2InferenceEngine() {}
APP_ERROR ContainerStep2InferenceEngine::Init()
{
this->modelConfig_ = Config::getins()->getModelByContainerStep2Config();
this->identifyConfig_ = Config::getins()->getIdentifyConfig();
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK);
if (iFolderExist == -1)
{
LogError << "模型:" << modelConfig_.strModelPath << " 不存在!";
return false;
}
class_num = this->modelConfig_.vecClass.size();
score_threshold = this->modelConfig_.fScoreThreshold;
int ret = initModel();
if (ret != APP_ERR_OK)
{
LogError << "Failed to read model info, ret = " << ret;
return ret;
}
LogInfo << "ContainerStep2InferenceEngine Init ok";
return APP_ERR_OK;
}
APP_ERROR ContainerStep2InferenceEngine::initModel()
{
modelinfo.yolov5ClearityModelParam.uiClassNum = class_num;
modelinfo.yolov5ClearityModelParam.uiClearNum = clear_num;
modelinfo.yolov5ClearityModelParam.uiDetSize = det_size;
modelinfo.yolov5ClearityModelParam.fScoreThreshold = score_threshold;
modelinfo.yolov5ClearityModelParam.fNmsThreshold = nms_threshold;
modelinfo.modelCommonInfo.uiModelWidth = model_width;
modelinfo.modelCommonInfo.uiModelHeight = model_height;
modelinfo.modelCommonInfo.uiInputSize = input_size;
modelinfo.modelCommonInfo.uiOutputSize = output_size;
modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL;
modelinfo.modelCommonInfo.uiBatchSize = batch_size;
modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME;
modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME;
string strModelName = "";
int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo,
strModelName,
this->modelConfig_.strModelPath);
if (nRet != 0)
{
LogError << "YoloV5ClassifyInferenceInit nRet:" << nRet;
return APP_ERR_COMM_READ_FAIL;
}
return APP_ERR_OK;
}
APP_ERROR ContainerStep2InferenceEngine::DeInit()
{
yolov5model.YoloV5ClearityInferenceDeinit();
LogInfo << "ContainerStep2InferenceEngine DeInit ok";
return APP_ERR_OK;
}
void ContainerStep2InferenceEngine::resetLocation(SingleData &singleData, SingleData &step1SingleData, float fResizeRatio)
{
singleData.fLTX = singleData.fLTX * fResizeRatio + step1SingleData.fLTX;
singleData.fLTY = singleData.fLTY * fResizeRatio + step1SingleData.fLTY;
singleData.fRBX = singleData.fRBX * fResizeRatio + step1SingleData.fLTX;
singleData.fRBY = singleData.fRBY * fResizeRatio + step1SingleData.fLTY;
singleData.fLTX = (singleData.fLTX < IMAGE_WIDTH) ? singleData.fLTX : IMAGE_WIDTH;
singleData.fLTY = (singleData.fLTY < IMAGE_HEIGHT) ? singleData.fLTY : IMAGE_HEIGHT;
singleData.fRBX = (singleData.fRBX < IMAGE_WIDTH) ? singleData.fRBX : IMAGE_WIDTH;
singleData.fRBY = (singleData.fRBY < IMAGE_HEIGHT) ? singleData.fRBY : IMAGE_HEIGHT;
}
APP_ERROR ContainerStep2InferenceEngine::Process()
{
int iRet = APP_ERR_OK;
while (!isStop_)
{
std::shared_ptr<void> pVoidData0 = nullptr;
inputQueMap_[strPort0_]->pop(pVoidData0);
if (nullptr == pVoidData0)
{
usleep(1000); //1ms
continue;
}
if (!this->identifyConfig_.bContainerDetect)
{
usleep(1000); //1ms
continue;
}
std::shared_ptr<VStep2InputData> pVStep2InputData = std::static_pointer_cast<VStep2InputData>(pVoidData0);
std::shared_ptr<VTrainStep1Data> pVTrainStep1Data = std::static_pointer_cast<VTrainStep1Data>(pVoidData0);
if (pVTrainStep1Data->cvImage.empty())
{
usleep(1000); //1ms
continue;
}
//进行推理
std::vector<stDetection> vecInferenceResult;
yolov5model.YoloV5ClearityInferenceModel(pVTrainStep1Data->cvImage, vecInferenceResult);
std::shared_ptr<VStep2OutputData> pVStep2OutputData = std::make_shared<VStep2OutputData>();
pVStep2OutputData->strTrainDate = pVStep2InputData->strTrainDate;
pVStep2OutputData->strTrainTime = pVStep2InputData->strTrainTime;
pVStep2OutputData->iFrameId = pVStep2InputData->iFrameId;
pVStep2OutputData->bIsEnd = pVStep2InputData->bIsEnd;
// LogWarn << "-- 0 -->" << pVStep2InputData->vecSingleData.size();
for (int i = 0; i < pVStep2InputData->vecSingleData.size(); i++)
{
// 第一步融合两种模型数据时集装箱的大框类别ID整体100
if (pVStep2InputData->vecSingleData[i].iTargetType != CONTAINER) continue;
Step2ResultData step2ResultData;
step2ResultData.fLTX = pVStep2InputData->vecSingleData[i].fLTX;
step2ResultData.fLTY = pVStep2InputData->vecSingleData[i].fLTY;
step2ResultData.fRBX = pVStep2InputData->vecSingleData[i].fRBX;
step2ResultData.fRBY = pVStep2InputData->vecSingleData[i].fRBY;
step2ResultData.iClassId = pVStep2InputData->vecSingleData[i].iClassId;
step2ResultData.fScore = pVStep2InputData->vecSingleData[i].fScore;
step2ResultData.iTrainIndex = pVStep2InputData->vecSingleData[i].iTrainIndex;
// LogDebug << "frameId:" << pVStep2InputData->iFrameId
// << " ["
// << pVStep2InputData->vecSingleData[i].fLTX
// << ","
// << pVStep2InputData->vecSingleData[i].fLTY
// << "],["
// << pVStep2InputData->vecSingleData[i].fRBX
// << ","
// << pVStep2InputData->vecSingleData[i].fRBY
// << "]";
cv::Rect rect(cv::Point(pVStep2InputData->vecSingleData[i].fLTX, pVStep2InputData->vecSingleData[i].fLTY),
cv::Point(pVStep2InputData->vecSingleData[i].fRBX, pVStep2InputData->vecSingleData[i].fRBY));
cv::Mat image = pVStep2InputData->cvImage(rect).clone();
//进行推理
std::vector<stDetection> vecInferenceResult;
auto start = std::chrono::system_clock::now(); // 计时开始
yolov5model.YoloV5ClearityInferenceModel(image, vecInferenceResult, 2);
auto end = std::chrono::system_clock::now();
for (int j = 0; j < vecInferenceResult.size(); j++)
{
SingleData singledata;
singledata.iLine = vecInferenceResult[j].clear_id;
singledata.iClassId = vecInferenceResult[j].class_id;
singledata.fScore = vecInferenceResult[j].class_conf;
singledata.fLTX = vecInferenceResult[j].bbox[0];
singledata.fLTY = vecInferenceResult[j].bbox[1];
singledata.fRBX = vecInferenceResult[j].bbox[2];
singledata.fRBY = vecInferenceResult[j].bbox[3];
singledata.fClear = vecInferenceResult[j].clear_id;
this->resetLocation(singledata, pVStep2InputData->vecSingleData[i]);
step2ResultData.vecSingleData.emplace_back(singledata);
// LogDebug << "frameId:" << pVStep2InputData->iFrameId
// << " --iClassId:" << singledata.iClassId
// << " iLine:" << singledata.iLine
// << " score=" << singledata.fScore
// << " ["
// << singledata.fLTX << "," << singledata.fLTY
// << "],["
// << singledata.fRBX << "," << singledata.fRBY
// << "]";
}
pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData);
}
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pVStep2OutputData), true);
}
return iRet;
}