generated from zhangwei/Matrixai
1 line
7.6 KiB
C++
1 line
7.6 KiB
C++
#include "TrainStep2InferenceEngine.h"
|
|
#include <opencv2/opencv.hpp>
|
|
#include "myqueue.h"
|
|
|
|
using namespace ai_matrix;
|
|
|
|
TrainStep2InferenceEngine::TrainStep2InferenceEngine() {}
|
|
|
|
TrainStep2InferenceEngine::~TrainStep2InferenceEngine() {}
|
|
|
|
APP_ERROR TrainStep2InferenceEngine::Init()
|
|
{
|
|
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
|
|
this->modelConfig_ = Config::getins()->getModelByTrainStep2Config();
|
|
|
|
//读取模型信息
|
|
int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK);
|
|
if (iFolderExist == -1)
|
|
{
|
|
LogError << "模型:" << modelConfig_.strModelPath << " 不存在!";
|
|
return false;
|
|
}
|
|
class_num = this->modelConfig_.vecClass.size();
|
|
score_threshold = this->modelConfig_.fScoreThreshold;
|
|
|
|
int ret = initModel();
|
|
if (ret != APP_ERR_OK)
|
|
{
|
|
LogError << "Failed to read model info, ret = " << ret;
|
|
return ret;
|
|
}
|
|
|
|
LogInfo << "TrainStep2InferenceEngine Init ok";
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
APP_ERROR TrainStep2InferenceEngine::initModel()
|
|
{
|
|
|
|
modelinfo.yolov5ClearityModelParam.uiClassNum = class_num;
|
|
modelinfo.yolov5ClearityModelParam.uiClearNum = clear_num;
|
|
modelinfo.yolov5ClearityModelParam.uiDetSize = det_size;
|
|
modelinfo.yolov5ClearityModelParam.fScoreThreshold = score_threshold;
|
|
modelinfo.yolov5ClearityModelParam.fNmsThreshold = nms_threshold;
|
|
|
|
modelinfo.modelCommonInfo.uiModelWidth = model_width;
|
|
modelinfo.modelCommonInfo.uiModelHeight = model_height;
|
|
modelinfo.modelCommonInfo.uiInputSize = input_size;
|
|
modelinfo.modelCommonInfo.uiOutputSize = output_size;
|
|
modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL;
|
|
modelinfo.modelCommonInfo.uiBatchSize = batch_size;
|
|
modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME;
|
|
modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME;
|
|
|
|
string strModelName = "";
|
|
|
|
int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo,
|
|
strModelName,
|
|
this->modelConfig_.strModelPath);
|
|
if (nRet != 0)
|
|
{
|
|
LogInfo << "YoloV5ClassifyInferenceInit nRet:" << nRet;
|
|
return APP_ERR_COMM_READ_FAIL;
|
|
}
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
APP_ERROR TrainStep2InferenceEngine::DeInit()
|
|
{
|
|
yolov5model.YoloV5ClearityInferenceDeinit();
|
|
LogInfo << "TrainStep2InferenceEngine DeInit ok";
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
APP_ERROR TrainStep2InferenceEngine::Process()
|
|
{
|
|
int iRet = APP_ERR_OK;
|
|
|
|
while (!isStop_)
|
|
{
|
|
std::shared_ptr<void> pVoidData0 = nullptr;
|
|
inputQueMap_[strPort0_]->pop(pVoidData0);
|
|
if (nullptr == pVoidData0)
|
|
{
|
|
usleep(1000); //1ms
|
|
continue;
|
|
}
|
|
|
|
std::shared_ptr<VStep2InputData> pVStep2InputData = std::static_pointer_cast<VStep2InputData>(pVoidData0);
|
|
std::shared_ptr<VStep2OutputData> pVStep2OutputData = std::make_shared<VStep2OutputData>();
|
|
pVStep2OutputData->strTrainDate = pVStep2InputData->strTrainDate;
|
|
pVStep2OutputData->strTrainTime = pVStep2InputData->strTrainTime;
|
|
pVStep2OutputData->iFrameId = pVStep2InputData->iFrameId;
|
|
pVStep2OutputData->bIsEnd = pVStep2InputData->bIsEnd;
|
|
|
|
if (pVStep2InputData->cvImage.empty())
|
|
{
|
|
usleep(1000); //1ms
|
|
continue;
|
|
}
|
|
|
|
// LogWarn << "-- 0 -->" << pVStep2InputData->vecSingleData.size();
|
|
for (int i = 0; i < pVStep2InputData->vecSingleData.size(); i++)
|
|
{
|
|
|
|
Step2ResultData step2ResultData;
|
|
step2ResultData.fLTX = pVStep2InputData->vecSingleData[i].fLTX;
|
|
step2ResultData.fLTY = pVStep2InputData->vecSingleData[i].fLTY;
|
|
step2ResultData.fRBX = pVStep2InputData->vecSingleData[i].fRBX;
|
|
step2ResultData.fRBY = pVStep2InputData->vecSingleData[i].fRBY;
|
|
step2ResultData.iClassId = pVStep2InputData->vecSingleData[i].iClassId;
|
|
step2ResultData.fScore = pVStep2InputData->vecSingleData[i].fScore;
|
|
step2ResultData.iTrainIndex = pVStep2InputData->vecSingleData[i].iTrainIndex;
|
|
|
|
if (pVStep2InputData->vecSingleData[i].iTargetType >= SPACE)
|
|
{
|
|
pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData);
|
|
continue;
|
|
};
|
|
|
|
// LogWarn << "frameId:" << pVStep2InputData->iFrameId
|
|
// << " ["
|
|
// << pVStep2InputData->vecSingleData[i].fLTX
|
|
// << ","
|
|
// << pVStep2InputData->vecSingleData[i].fLTY
|
|
// << "],["
|
|
// << pVStep2InputData->vecSingleData[i].fRBX
|
|
// << ","
|
|
// << pVStep2InputData->vecSingleData[i].fRBY
|
|
// << "]";
|
|
|
|
cv::Rect rect(cv::Point(pVStep2InputData->vecSingleData[i].fLTX, pVStep2InputData->vecSingleData[i].fLTY),
|
|
cv::Point(pVStep2InputData->vecSingleData[i].fRBX, pVStep2InputData->vecSingleData[i].fRBY));
|
|
cv::Mat image = pVStep2InputData->cvImage(rect).clone();
|
|
|
|
//进行推理
|
|
std::vector<stDetection> vecInferenceResult;
|
|
auto start = std::chrono::system_clock::now(); // 计时开始
|
|
yolov5model.YoloV5ClearityInferenceModel(image, vecInferenceResult, 2);
|
|
auto end = std::chrono::system_clock::now();
|
|
|
|
for (int j = 0; j < vecInferenceResult.size(); j++)
|
|
{
|
|
SingleData singledata;
|
|
singledata.iLine = vecInferenceResult[j].clear_id;
|
|
singledata.iClassId = vecInferenceResult[j].class_id;
|
|
singledata.fScore = vecInferenceResult[j].class_conf;
|
|
singledata.fLTX = vecInferenceResult[j].bbox[0];
|
|
singledata.fLTY = vecInferenceResult[j].bbox[1];
|
|
singledata.fRBX = vecInferenceResult[j].bbox[2];
|
|
singledata.fRBY = vecInferenceResult[j].bbox[3];
|
|
singledata.fClear = vecInferenceResult[j].clear_id;
|
|
|
|
this->resetLocation(singledata, pVStep2InputData->vecSingleData[i]);
|
|
step2ResultData.vecSingleData.emplace_back(singledata);
|
|
|
|
// LogDebug << "frameId:" << pVStep2InputData->iFrameId
|
|
// << " --iClassId:" << singledata.iClassId
|
|
// << " iLine:" << singledata.iLine
|
|
// << " score=" << singledata.fScore
|
|
// << " ["
|
|
// << singledata.fLTX << "," << singledata.fLTY
|
|
// << "],["
|
|
// << singledata.fRBX << "," << singledata.fRBY
|
|
// << "]";
|
|
}
|
|
|
|
pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData);
|
|
}
|
|
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pVStep2OutputData), true);
|
|
}
|
|
return APP_ERR_OK;
|
|
}
|
|
|
|
void TrainStep2InferenceEngine::resetLocation(SingleData &singleData, SingleData &step1SingleData, float fResizeRatio)
|
|
{
|
|
singleData.fLTX = singleData.fLTX * fResizeRatio + step1SingleData.fLTX;
|
|
singleData.fLTY = singleData.fLTY * fResizeRatio + step1SingleData.fLTY;
|
|
singleData.fRBX = singleData.fRBX * fResizeRatio + step1SingleData.fLTX;
|
|
singleData.fRBY = singleData.fRBY * fResizeRatio + step1SingleData.fLTY;
|
|
|
|
singleData.fLTX = (singleData.fLTX < IMAGE_WIDTH) ? singleData.fLTX : IMAGE_WIDTH;
|
|
singleData.fLTY = (singleData.fLTY < IMAGE_HEIGHT) ? singleData.fLTY : IMAGE_HEIGHT;
|
|
singleData.fRBX = (singleData.fRBX < IMAGE_WIDTH) ? singleData.fRBX : IMAGE_WIDTH;
|
|
singleData.fRBY = (singleData.fRBY < IMAGE_HEIGHT) ? singleData.fRBY : IMAGE_HEIGHT;
|
|
} |