增加侧边右向行车摄像头使用箱角进行切分
This commit is contained in:
parent
bdaaf2eca4
commit
0b8eadd54a
|
@ -70,6 +70,8 @@ namespace ai_matrix
|
|||
this->identifyConfig_.iTargetMinWidth = config_["identify"]["target_min_width"].as<int>();
|
||||
this->identifyConfig_.iTargetMinY = config_["identify"]["target_min_y"].as<int>();
|
||||
this->identifyConfig_.iMaxIdentifyFrame = config_["identify"]["max_identify_frame"].as<int>();
|
||||
this->identifyConfig_.iMaxContainerSpaceX = config_["identify"]["max_container_space_x"].as<int>();
|
||||
this->identifyConfig_.iMaxContainerSpaceY = config_["identify"]["max_container_space_y"].as<int>();
|
||||
|
||||
// websocket server 服务端参数
|
||||
this->wSocketConfig_.bIsUse = config_["wsocket_server"]["is_use"].as<bool>();
|
||||
|
|
|
@ -88,6 +88,10 @@ namespace ai_matrix
|
|||
int iTargetMinY;
|
||||
// 单次识别最大帧数
|
||||
int iMaxIdentifyFrame;
|
||||
// 两个箱子的箱角最大差值X
|
||||
int iMaxContainerSpaceX;
|
||||
// 两个箱子的箱角最大差值Y
|
||||
int iMaxContainerSpaceY;
|
||||
};
|
||||
|
||||
// websocket_server 的服务端参数
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# 基础控制参数
|
||||
base:
|
||||
# 股道名称
|
||||
track_name: "1"
|
||||
track_name: "1门"
|
||||
# 测试模式
|
||||
test_model: false
|
||||
# Api 监听端口
|
||||
|
@ -24,43 +24,63 @@ log:
|
|||
|
||||
# 数据源参数
|
||||
data_source:
|
||||
url: "./vedio/buertai2.mp4"
|
||||
# 跳帧数
|
||||
skip_interval: 3
|
||||
# 行驶方向 0-自动识别 1-向左 2-向右 (与“首位信息”成对存在,形成例如向左就编号在前,向右就属性在前的对应)
|
||||
direction: 0
|
||||
# 0-向左编号在前 1-向左属性在前 (向右行驶的情况:2-向右编号在前 3-向右属性在前)
|
||||
left_first: 0
|
||||
# (向左行驶的情况:0-向左编号在前 1-向左属性在前) 2-向右编号在前 3-向右属性在前
|
||||
right_first: 3
|
||||
# 识别区域
|
||||
identify_areas: [120, 0, 1800, 1080]
|
||||
- # 顶部摄像头(必须顶部摄像头)
|
||||
url: "./videos/buertai2.mp4"
|
||||
# 跳帧数
|
||||
skip_interval: 3
|
||||
# 识别区域
|
||||
identify_areas: [120, 0, 1800, 1080]
|
||||
# 切箱方式
|
||||
divide_mode: "corner" #[corner, pixel]
|
||||
# 汽车行进方向
|
||||
run_direction: "down" #[up, down, left, right]
|
||||
- # 侧部摄像头
|
||||
url: "./videos/buertai2.mp4"
|
||||
# 跳帧数
|
||||
skip_interval: 3
|
||||
# 识别区域
|
||||
identify_areas: [ 120, 0, 1800, 1080 ]
|
||||
# 切箱方式
|
||||
divide_mode: "pixel" #[corner, pixel]
|
||||
# 汽车行进方向
|
||||
run_direction: "right" #[up, down, left, right]
|
||||
- # 侧边摄像头
|
||||
url: "./videos/buertai2.mp4"
|
||||
# 跳帧数
|
||||
skip_interval: 3
|
||||
# 识别区域
|
||||
identify_areas: [ 120, 0, 1800, 1080 ]
|
||||
# 切箱方式
|
||||
divide_mode: "pixel" #[corner, pixel]
|
||||
# 汽车行进方向
|
||||
run_direction: "left" #[up, down, left, right]
|
||||
|
||||
# 识别参数
|
||||
identify:
|
||||
# 运行方式
|
||||
run_mode: "always" #[always; command]
|
||||
# 是否开启动态检测
|
||||
need_move_detect_flag: true
|
||||
# 识别方向 [LEFT,RIGHT,ALL]
|
||||
identify_direction: "LEFT"
|
||||
# 切箱方式
|
||||
divide_mode: "corner" #[corner, pixel]
|
||||
# 大框帧跨度(比一个大框从出现到消失的跨度稍大一点, 跟跳帧有关系)
|
||||
partition_frame_span: 20
|
||||
# 大框帧跨度的位置像素差异
|
||||
split_frame_span_px: 200
|
||||
partition_frame_span: 0
|
||||
# 顶部Y轴大框帧跨度的位置像素差异
|
||||
top_y_split_span_px: 200
|
||||
# 侧边X轴大框帧跨度的位置像素差异
|
||||
side_x_split_span_px: 600
|
||||
# 每帧大框位置差异最小值 (持续小于此值,则可能停车)
|
||||
chkstop_px: 15
|
||||
# 持续X次续位置差异小于gc_chkstop_px,则判断为停车。
|
||||
chkstop_count: 10
|
||||
# 过滤最小大框高度(不需要的话就写个很小的值)
|
||||
num_frame_height: 150
|
||||
pro_frame_height: 120
|
||||
# 过滤最大框宽度(不需要的话就写个很大的值)
|
||||
space_frame_width: 500
|
||||
# 是否识别车头
|
||||
train_heard_detect: false
|
||||
# 是否识别集装箱
|
||||
container_detect: false
|
||||
target_min_height: 95
|
||||
# 过滤最小大框宽度(不需要的话就写个很小的值)
|
||||
target_min_width: 50
|
||||
# 过滤低于指定Y轴的大框
|
||||
target_min_y: 550
|
||||
# 单次识别最大帧数
|
||||
max_identify_frame: 600
|
||||
# 两个箱子的箱角最大差值X
|
||||
max_container_space_x: 600
|
||||
# 两个箱子的箱角最大差值Y
|
||||
max_container_space_y: 500
|
||||
|
||||
# websocket_server 的服务端参数
|
||||
wsocket_server:
|
||||
|
|
|
@ -1,44 +1,56 @@
|
|||
#use_deviceid:
|
||||
# #engineid: deviceid
|
||||
# 0: 0
|
||||
|
||||
#engine实例
|
||||
engines:
|
||||
ApiEngine: 0
|
||||
DeleteExpiredFolderEngine: 0
|
||||
WSServerEngine: 0
|
||||
VideoEngine: 0
|
||||
VideoEngine: 1
|
||||
VideoEngine: 2
|
||||
VideoDecodeEngine: 0
|
||||
MoveEngine: 0
|
||||
SaveMoveImageEngine: 0
|
||||
VideoDecodeEngine: 1
|
||||
VideoDecodeEngine: 2
|
||||
ControlEngine: 0
|
||||
# SaveMoveImageEngine: 0
|
||||
SaveMoveInfoEngine: 0
|
||||
TrainStep1DataReadEngine: 0
|
||||
TrainStep1InferenceEngine: 0
|
||||
TrainStep1FilterEngine: 0
|
||||
TrainDivideEngine: 0
|
||||
TrainStep2DataReadEngine: 0
|
||||
TrainStep2InferenceEngine: 0
|
||||
TrainCharacterConversionEngine: 0
|
||||
ContainerStep1InferenceEngine: 0
|
||||
CornerInferenceEngine: 0
|
||||
Step1MergeEngine: 0
|
||||
ContainerStep2InferenceEngine: 0
|
||||
ContainerCharacterConversionEngine: 0
|
||||
ContainerDivideEngine: 0
|
||||
ContainerDivideEngine: 1
|
||||
ContainerDivideEngine: 2
|
||||
SelectBestEngine: 0
|
||||
SaveResultCSVEngine: 0
|
||||
ToHttpSrvEngine: 0
|
||||
ToMinioSrvEngine: 0
|
||||
SaveDebugImageEngine: 0
|
||||
|
||||
#engine连接
|
||||
connects:
|
||||
WSServerEngine_0_0: "ControlEngine_0_0 1024"
|
||||
VideoEngine_0_0: "VideoDecodeEngine_0_0 1024"
|
||||
VideoDecodeEngine_0_0: "MoveEngine_0_0 1024"
|
||||
MoveEngine_0_0: "SaveMoveImageEngine_0_0 1024"
|
||||
MoveEngine_0_1: "SaveMoveInfoEngine_0_0 1024"
|
||||
SaveMoveImageEngine_0_0: "TrainStep1DataReadEngine_0_0 1024"
|
||||
TrainStep1DataReadEngine_0_0: "TrainStep1InferenceEngine_0_0 1024"
|
||||
TrainStep1InferenceEngine_0_0: "TrainStep1FilterEngine_0_0 1024"
|
||||
TrainStep1FilterEngine_0_0: "TrainDivideEngine_0_0 1024"
|
||||
TrainDivideEngine_0_0: "TrainStep2DataReadEngine_0_0 1024"
|
||||
TrainStep2DataReadEngine_0_0: "TrainStep2InferenceEngine_0_0 1024"
|
||||
TrainStep2InferenceEngine_0_0: "TrainCharacterConversionEngine_0_0 1024"
|
||||
TrainCharacterConversionEngine_0_0: "SelectBestEngine_0_0 1024"
|
||||
TrainCharacterConversionEngine_0_1: "SaveDebugImageEngine_0_0 1024"
|
||||
SelectBestEngine_0_0: "SaveResultCSVEngine_0_0 1024"
|
||||
SaveResultCSVEngine_0_0: "ToHttpSrvEngine_0_0 1024"
|
||||
SaveResultCSVEngine_0_1: "ToMinioSrvEngine_0_0 1024"
|
||||
VideoEngine_1_0: "VideoDecodeEngine_1_0 1024"
|
||||
VideoEngine_2_0: "VideoDecodeEngine_2_0 1024"
|
||||
VideoDecodeEngine_0_0: "ControlEngine_0_1 1024"
|
||||
VideoDecodeEngine_1_0: "ControlEngine_0_1 1024"
|
||||
VideoDecodeEngine_2_0: "ControlEngine_0_1 1024"
|
||||
ControlEngine_0_0: "WSServerEngine_0_0 1024"
|
||||
ControlEngine_0_1: "ContainerStep1InferenceEngine_0_0 1024"
|
||||
ControlEngine_0_2: "CornerInferenceEngine_0_0 1024"
|
||||
ControlEngine_0_3: "SaveMoveInfoEngine_0_0 1024"
|
||||
ControlEngine_0_4: "SaveMoveImageEngine_0_0 1024"
|
||||
ContainerStep1InferenceEngine_0_0: "Step1MergeEngine_0_0 1024"
|
||||
CornerInferenceEngine_0_0: "Step1MergeEngine_0_1 1024"
|
||||
Step1MergeEngine_0_0: "ContainerStep2InferenceEngine_0_0 1024"
|
||||
ContainerStep2InferenceEngine_0_0: "ContainerCharacterConversionEngine_0_0 1024"
|
||||
ContainerCharacterConversionEngine_0_0: "ContainerDivideEngine_0_0 1024"
|
||||
ContainerCharacterConversionEngine_0_1: "ContainerDivideEngine_1_0 1024"
|
||||
ContainerCharacterConversionEngine_0_2: "ContainerDivideEngine_2_0 1024"
|
||||
ContainerCharacterConversionEngine_0_3: "SaveDebugImageEngine_0_0 1024"
|
||||
ContainerDivideEngine_0_0: "SelectBestEngine_0_0 1024"
|
||||
ContainerDivideEngine_1_0: "SelectBestEngine_0_0 1024"
|
||||
ContainerDivideEngine_2_0: "SelectBestEngine_0_0 1024"
|
||||
SelectBestEngine_0_0: "WSServerEngine_0_0 1024"
|
||||
SelectBestEngine_0_1: "ToMinioSrvEngine_0_0 1024"
|
||||
SelectBestEngine_0_2: "SaveResultCSVEngine_0_0 1024"
|
||||
|
||||
|
|
|
@ -39,9 +39,17 @@ train_step2_model:
|
|||
container_step1_model:
|
||||
model_path: "./model/container_step1/con1.engine"
|
||||
score_threshold: 0.6
|
||||
class: []
|
||||
class: ["CONTAINERNUM","CONTAINERNUM_REVERSE"] # ,"SPACE_T","SPACENX_T","SPACEG_T","SPACEP_T","SPACEP_T"
|
||||
# 集装箱字符识别
|
||||
container_step2_model:
|
||||
model_path: "./model/container_step2/con2.engine"
|
||||
score_threshold: 0.7
|
||||
class: []
|
||||
class: ["0","1","2","3","4","5","6","7","8","9",
|
||||
"A","B","C","D","E","F","G","H","I","J","K","L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V","W", "X", "Y", "Z",
|
||||
"change", "load", "m", "self", "t", "volume", "meter",")", "(", "?", "-"
|
||||
]
|
||||
# 集装箱箱角识别
|
||||
corner_model:
|
||||
model_path: "./model/corner/corner.engine"
|
||||
score_threshold: 0.7
|
||||
class: ["CNTR_CORNER","CNTR_CORNER_TOP"]
|
|
@ -380,4 +380,22 @@ typedef struct
|
|||
|
||||
} DetectResultData;
|
||||
|
||||
|
||||
// 存图数据
|
||||
typedef struct
|
||||
{
|
||||
// 数据来源标识
|
||||
int iDataSource;
|
||||
std::string strDetectDate;
|
||||
std::string strDetectTime;
|
||||
// 帧序号
|
||||
int iFrameId;
|
||||
// 箱号大框以及对应小框的集合
|
||||
Step2ResultData step2ResultData;
|
||||
// 箱角大框
|
||||
std::vector<Step2ResultData> vecCornerResultData;
|
||||
// 图片
|
||||
cv::Mat cvImage;
|
||||
} SaveDebugImgData;
|
||||
|
||||
#endif //TRAIN_COMMONSTRUCT_H
|
||||
|
|
|
@ -77,6 +77,10 @@ identify:
|
|||
target_min_y: 550
|
||||
# 单次识别最大帧数
|
||||
max_identify_frame: 600
|
||||
# 两个箱子的箱角最大差值X
|
||||
max_container_space_x: 600
|
||||
# 两个箱子的箱角最大差值Y
|
||||
max_container_space_y: 500
|
||||
|
||||
# websocket_server 的服务端参数
|
||||
wsocket_server:
|
||||
|
|
|
@ -38,7 +38,7 @@ connects:
|
|||
ControlEngine_0_1: "ContainerStep1InferenceEngine_0_0 1024"
|
||||
ControlEngine_0_2: "CornerInferenceEngine_0_0 1024"
|
||||
ControlEngine_0_3: "SaveMoveInfoEngine_0_0 1024"
|
||||
ControlEngine_0_4: "SaveMoveImageEngine_0_0 1024"
|
||||
ControlEngine_0_4: "SaveMoveImageEngine_0_0 1024"
|
||||
ContainerStep1InferenceEngine_0_0: "Step1MergeEngine_0_0 1024"
|
||||
CornerInferenceEngine_0_0: "Step1MergeEngine_0_1 1024"
|
||||
Step1MergeEngine_0_0: "ContainerStep2InferenceEngine_0_0 1024"
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -47,7 +47,9 @@ void ControlEngine::initParam()
|
|||
this->strDetectDate_ = "";
|
||||
for (int i = 0; i < this->vecDataSourceConfig_.size(); ++i)
|
||||
{
|
||||
this->mapDetectNO_[i] = 1;
|
||||
this->mapDetectNO_[i] = 0;
|
||||
this->mapIdentifyType_[i] = IDENTIFY_INIT;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,18 +64,10 @@ void ControlEngine::endIdentify(int iDataSource)
|
|||
outputQueMap_[strPort1_]->push(std::static_pointer_cast<void>(pVDetectInfo), true);
|
||||
outputQueMap_[strPort2_]->push(std::static_pointer_cast<void>(pVDetectInfo), true);
|
||||
|
||||
this->mapDetectNO_[iDataSource] = 1;
|
||||
this->mapDetectNO_[iDataSource] = 0;
|
||||
this->mapIdentifyType_[iDataSource] = IDENTIFY_INIT;
|
||||
|
||||
bool bAllEnd = true;
|
||||
for (const auto & dataSource_it : this->mapDetectNO_)
|
||||
{
|
||||
if (dataSource_it.second != 1) bAllEnd = false;
|
||||
}
|
||||
if (bAllEnd)
|
||||
{
|
||||
g_identify_type = IDENTIFY_INIT;
|
||||
this->strDetectDate_ = "";
|
||||
}
|
||||
LogInfo << " 数据源:" << iDataSource << " --- 识别结束!";
|
||||
}
|
||||
|
||||
void ControlEngine::sendWSEngine(std::string msg)
|
||||
|
@ -83,6 +77,19 @@ void ControlEngine::sendWSEngine(std::string msg)
|
|||
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(std::make_shared<std::string>(msg)));
|
||||
}
|
||||
|
||||
bool ControlEngine::isDetecting()
|
||||
{
|
||||
for (const auto & dataSource : this->mapIdentifyType_)
|
||||
{
|
||||
if (dataSource.second == IDENTIFY_START)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ControlEngine::detectControl(std::shared_ptr<std::string> pWSServerOrder)
|
||||
{
|
||||
Json::CharReaderBuilder readerBuilder;
|
||||
|
@ -116,24 +123,25 @@ void ControlEngine::detectControl(std::shared_ptr<std::string> pWSServerOrder)
|
|||
|
||||
switch (jvOrder["commandType"].asInt()) {
|
||||
case IDENTIFY_START:
|
||||
if (g_identify_type == 1)
|
||||
|
||||
if (this->isDetecting())
|
||||
{
|
||||
std::string msg = "当前正在识别,无需重复发送识别信号";
|
||||
LogWarn << msg;
|
||||
this->sendWSEngine(msg);
|
||||
break;
|
||||
}
|
||||
g_identify_type = IDENTIFY_START;
|
||||
this->mapIdentifyType_ = {{0, IDENTIFY_START}, {1, IDENTIFY_START}, {2, IDENTIFY_START}};
|
||||
break;
|
||||
case IDENTIFY_STOP:
|
||||
if (!g_identify_type)
|
||||
if (!this->isDetecting())
|
||||
{
|
||||
std::string msg = "当前已停止识别,无需重复发送结束信号";
|
||||
LogWarn << msg;
|
||||
this->sendWSEngine(msg);
|
||||
break;
|
||||
}
|
||||
g_identify_type = IDENTIFY_INIT;
|
||||
this->mapIdentifyType_ = {{0, IDENTIFY_INIT}, {1, IDENTIFY_INIT}, {2, IDENTIFY_INIT}};
|
||||
break;
|
||||
case IDENTIFY_RECORD:
|
||||
if (!jvOrder.isMember("containerNo"))
|
||||
|
@ -181,29 +189,36 @@ APP_ERROR ControlEngine::Process()
|
|||
|
||||
if (pProcessData->bIsEnd)
|
||||
{
|
||||
// 仅读是视频模式下会进行
|
||||
if (this->mapDetectNO_[pProcessData->iDataSource] == 1) continue;
|
||||
if (!this->isDetecting()) continue;
|
||||
this->endIdentify(pProcessData->iDataSource);
|
||||
LogInfo << "数据源:" << pProcessData->iDataSource << " 视频画面播放结束:停止识别!";
|
||||
LogInfo << "数据源:" << pProcessData->iDataSource << " 画面读取结束:停止识别!";
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!g_identify_type)
|
||||
if (!this->isDetecting())
|
||||
{
|
||||
if (this->mapDetectNO_[pProcessData->iDataSource] != 1)
|
||||
if (this->mapDetectNO_[pProcessData->iDataSource] > 0)
|
||||
{
|
||||
this->endIdentify(pProcessData->iDataSource);
|
||||
}
|
||||
this->strDetectDate_ = "";
|
||||
continue;
|
||||
}
|
||||
|
||||
if (this->mapIdentifyType_[pProcessData->iDataSource] == IDENTIFY_INIT)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (this->mapDetectNO_[pProcessData->iDataSource] > this->identifyConfig_.iMaxIdentifyFrame)
|
||||
{
|
||||
LogInfo << " 数据源:" << pProcessData->iDataSource << " 超过最大允许识别帧数:" << this->identifyConfig_.iMaxIdentifyFrame << " 停止识别!";
|
||||
this->endIdentify(pProcessData->iDataSource);
|
||||
LogInfo << "数据源:" << pProcessData->iDataSource << " 超过最大允许识别帧数:" << this->identifyConfig_.iMaxIdentifyFrame << " 停止识别!";
|
||||
continue;
|
||||
}
|
||||
|
||||
this->mapDetectNO_[pProcessData->iDataSource]++;
|
||||
|
||||
cv::Mat image(pProcessData->dataSourceInfo.iHeight,
|
||||
pProcessData->dataSourceInfo.iWidth,
|
||||
CV_8UC3,
|
||||
|
@ -245,14 +260,14 @@ APP_ERROR ControlEngine::Process()
|
|||
outputQueMap_[strPort3_]->push(std::static_pointer_cast<void>(pVDetectInfo), true);
|
||||
|
||||
// 存图
|
||||
// std::shared_ptr<SaveImgData> pSaveImgData = std::make_shared<SaveImgData>();
|
||||
// pSaveImgData->strFilePath = strFilePath;
|
||||
// pSaveImgData->strFileName = std::to_string(this->mapDetectNO_[pProcessData->iDataSource]) + "_" + std::to_string(pProcessData->iDataSource) + ".jpg";
|
||||
// pSaveImgData->cvImage = image;
|
||||
// pSaveImgData->bIsEnd = pProcessData->bIsEnd;
|
||||
// outputQueMap_[strPort4_]->push(std::static_pointer_cast<void>(pSaveImgData), true);
|
||||
std::shared_ptr<SaveImgData> pSaveImgData = std::make_shared<SaveImgData>();
|
||||
pSaveImgData->strFilePath = strFilePath;
|
||||
pSaveImgData->strFileName = std::to_string(this->mapDetectNO_[pProcessData->iDataSource]) + "_" + std::to_string(pProcessData->iDataSource) + ".jpg";
|
||||
pSaveImgData->cvImage = image;
|
||||
pSaveImgData->bIsEnd = pProcessData->bIsEnd;
|
||||
outputQueMap_[strPort4_]->push(std::static_pointer_cast<void>(pSaveImgData), true);
|
||||
|
||||
|
||||
this->mapDetectNO_[pProcessData->iDataSource]++;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ private:
|
|||
|
||||
uint32_t iDetectNO_ = 1; //动态检测数据编号
|
||||
std::map<int, uint32_t> mapDetectNO_;
|
||||
std::map<int, int> mapIdentifyType_;
|
||||
|
||||
std::string strDetectDate_;
|
||||
std::string strDetectTime_;
|
||||
|
@ -45,6 +46,7 @@ private:
|
|||
void endIdentify(int iDataSource);
|
||||
void sendWSEngine(std::string msg);
|
||||
void detectControl(std::shared_ptr<std::string> pWSServerOrder);
|
||||
bool isDetecting();
|
||||
|
||||
};
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -166,7 +166,7 @@ APP_ERROR ToMinioSrvEngine::Process()
|
|||
strMinIoPath,
|
||||
strLocalPath))
|
||||
{
|
||||
LogWarn << "数据上传失败! -- " << this->baseConfig_.strDebugResultPath;
|
||||
LogWarn << "数据上传失败! -- " << strLocalPath;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -151,7 +151,7 @@ APP_ERROR VideoDecodeEngine::Process()
|
|||
}
|
||||
else
|
||||
{
|
||||
LogError << " 硬解码失败... 返回失败信息:" << iDecodeRet;
|
||||
LogError << "数据源:" << pProcessData->iDataSource << " 硬解码失败... 返回失败信息:" << iDecodeRet;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1 +1 @@
|
|||
#include "Step1MergeEngine.h"
using namespace ai_matrix;
namespace
{
//按照x坐标排列
bool CompareX(const SingleData &a, const SingleData &b)
{
return a.fLTX < b.fLTX;
}
}
Step1MergeEngine::Step1MergeEngine() {}
Step1MergeEngine::~Step1MergeEngine() {}
APP_ERROR Step1MergeEngine::Init()
{
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1";
this->baseConfig_ = Config::getins()->getBaseConfig();
this->identifyConfig_ = Config::getins()->getIdentifyConfig();
this->multiTypeQueue_ = new ai_matrix::MultiTypeQueue(2);
LogInfo << "MergeEngine Init ok";
return APP_ERR_OK;
}
APP_ERROR Step1MergeEngine::DeInit()
{
LogInfo << "MergeEngine DeInit ok";
return APP_ERR_OK;
}
APP_ERROR Step1MergeEngine::Process()
{
int iRet = APP_ERR_OK;
while (!isStop_)
{
std::shared_ptr<void> pVoidData0 = nullptr;
inputQueMap_[strPort0_]->pop(pVoidData0);
std::shared_ptr<void> pVoidData1 = nullptr;
inputQueMap_[strPort1_]->pop(pVoidData1);
if (nullptr == pVoidData0 && nullptr == pVoidData1)
{
usleep(1000); //1ms
continue;
}
if (pVoidData0)
{
this->multiTypeQueue_->PushData(0, pVoidData0);
}
if (pVoidData1)
{
this->multiTypeQueue_->PushData(1, pVoidData1);
}
if (!this->multiTypeQueue_->PopAllData(pVoidData0, pVoidData1))
{
usleep(1000); //1ms
continue;
}
std::shared_ptr<InferenceResultData> pInferenceResultData =
std::static_pointer_cast<InferenceResultData>(pVoidData0);
std::shared_ptr<InferenceResultData> pInferenceResultData_container =
std::static_pointer_cast<InferenceResultData>(pVoidData1);
pInferenceResultData->vecSingleData = pInferenceResultData_container->vecSingleData;
std::sort(pInferenceResultData->vecSingleData.begin(),
pInferenceResultData->vecSingleData.end(),
CompareX);
if (pInferenceResultData->singleData.fScore > 0.0f)
{
// 箱号
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " 数据源:" << pInferenceResultData->iDataSource
// << " --iClassId:" << pInferenceResultData->singleData.iClassId
// << " confidence=" << pInferenceResultData->singleData.fScore
// << " lx=" << pInferenceResultData->singleData.fLTX
// << " ly=" << pInferenceResultData->singleData.fLTY
// << " rx=" << pInferenceResultData->singleData.fRBX
// << " ry=" << pInferenceResultData->singleData.fRBY
// << " clear:" << pInferenceResultData->singleData.fClear;
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
}
// 箱角
for (const auto & it_result : pInferenceResultData->vecSingleData)
{
LogDebug << " 帧:" << pInferenceResultData->iFrameId
<< " 数据源:" << pInferenceResultData->iDataSource
<< " --iClassId:" << it_result.iClassId
<< " confidence=" << it_result.fScore
<< " lx=" << it_result.fLTX
<< " ly=" << it_result.fLTY
<< " rx=" << it_result.fRBX
<< " ry=" << it_result.fRBY;
}
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
}
return APP_ERR_OK;
}
|
||||
#include "Step1MergeEngine.h"
using namespace ai_matrix;
namespace
{
//按照x坐标排列
bool CompareX(const SingleData &a, const SingleData &b)
{
return a.fLTX < b.fLTX;
}
}
Step1MergeEngine::Step1MergeEngine() {}
Step1MergeEngine::~Step1MergeEngine() {}
APP_ERROR Step1MergeEngine::Init()
{
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1";
this->baseConfig_ = Config::getins()->getBaseConfig();
this->identifyConfig_ = Config::getins()->getIdentifyConfig();
this->multiTypeQueue_ = new ai_matrix::MultiTypeQueue(2);
LogInfo << "MergeEngine Init ok";
return APP_ERR_OK;
}
APP_ERROR Step1MergeEngine::DeInit()
{
LogInfo << "MergeEngine DeInit ok";
return APP_ERR_OK;
}
APP_ERROR Step1MergeEngine::Process()
{
int iRet = APP_ERR_OK;
while (!isStop_)
{
std::shared_ptr<void> pVoidData0 = nullptr;
inputQueMap_[strPort0_]->pop(pVoidData0);
std::shared_ptr<void> pVoidData1 = nullptr;
inputQueMap_[strPort1_]->pop(pVoidData1);
if (nullptr == pVoidData0 && nullptr == pVoidData1)
{
usleep(1000); //1ms
continue;
}
if (pVoidData0)
{
this->multiTypeQueue_->PushData(0, pVoidData0);
}
if (pVoidData1)
{
this->multiTypeQueue_->PushData(1, pVoidData1);
}
if (!this->multiTypeQueue_->PopAllData(pVoidData0, pVoidData1))
{
usleep(1000); //1ms
continue;
}
std::shared_ptr<InferenceResultData> pInferenceResultData =
std::static_pointer_cast<InferenceResultData>(pVoidData0);
std::shared_ptr<InferenceResultData> pInferenceResultData_container =
std::static_pointer_cast<InferenceResultData>(pVoidData1);
pInferenceResultData->vecSingleData = pInferenceResultData_container->vecSingleData;
std::sort(pInferenceResultData->vecSingleData.begin(),
pInferenceResultData->vecSingleData.end(),
CompareX);
if (pInferenceResultData->singleData.fScore > 0.0f)
{
// 箱号
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " 数据源:" << pInferenceResultData->iDataSource
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " --iClassId:" << pInferenceResultData->singleData.iClassId
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " confidence=" << pInferenceResultData->singleData.fScore
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " lx=" << pInferenceResultData->singleData.fLTX
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " ly=" << pInferenceResultData->singleData.fLTY
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " rx=" << pInferenceResultData->singleData.fRBX
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " ry=" << pInferenceResultData->singleData.fRBY
// LogDebug << " 帧:" << pInferenceResultData->iFrameId
// << " clear:" << pInferenceResultData->singleData.fClear;
}
// 箱角
for (const auto & it_result : pInferenceResultData->vecSingleData)
{
LogDebug << " 帧:" << pInferenceResultData->iFrameId
<< " 数据源:" << pInferenceResultData->iDataSource
<< " --iClassId:" << it_result.iClassId
<< " confidence=" << it_result.fScore
<< " lx=" << it_result.fLTX
<< " ly=" << it_result.fLTY
<< " rx=" << it_result.fRBX
<< " ry=" << it_result.fRBY;
}
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
}
return APP_ERR_OK;
}
|
|
@ -43,6 +43,7 @@ APP_ERROR SaveDebugImageEngine::Process()
|
|||
}
|
||||
|
||||
std::shared_ptr<VStep2OutputData> pVStep2OutputData = std::static_pointer_cast<VStep2OutputData>(pvoidd);
|
||||
cv::Mat image = pVStep2OutputData->cvImage.clone();
|
||||
|
||||
std::string strDataDir = this->baseConfig_.strDebugResultPath + "/"
|
||||
+ pVStep2OutputData->strDetectDate + "/"
|
||||
|
@ -66,9 +67,9 @@ APP_ERROR SaveDebugImageEngine::Process()
|
|||
|
||||
// cv::Mat image = cv::imread(strImagePath);
|
||||
|
||||
if (pVStep2OutputData->cvImage.empty())
|
||||
if (image.empty())
|
||||
{
|
||||
LogWarn << " 帧:" << pVStep2OutputData->iFrameId << " 数据源:" << pVStep2OutputData->iDataSource << " debug图像未找到";
|
||||
// LogWarn << " 帧:" << pVStep2OutputData->iFrameId << " 数据源:" << pVStep2OutputData->iDataSource << " debug图像未找到";
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -86,7 +87,7 @@ APP_ERROR SaveDebugImageEngine::Process()
|
|||
cv::Size text_size = cv::getTextSize(i, cv::FONT_HERSHEY_SIMPLEX, 1, 2, 0);
|
||||
cv::Point baseline_loc(text_org);
|
||||
baseline_loc.y += base_line + text_size.height;
|
||||
cv::putText(pVStep2OutputData->cvImage, i, baseline_loc, cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0, 255), 2);
|
||||
cv::putText(image, i, baseline_loc, cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0, 255), 2);
|
||||
// 手动调整x位置,为下一个单词留出空间
|
||||
text_org.x += text_size.width + 10;
|
||||
text_org.x = text_org.x > IMAGE_WIDTH ? 15 : text_org.x;
|
||||
|
@ -98,17 +99,17 @@ APP_ERROR SaveDebugImageEngine::Process()
|
|||
float centerX = pVStep2OutputData->step2ResultData.fLTX + (pVStep2OutputData->step2ResultData.fRBX - pVStep2OutputData->step2ResultData.fLTX)/2;
|
||||
float centerY = pVStep2OutputData->step2ResultData.fLTY + (pVStep2OutputData->step2ResultData.fRBY - pVStep2OutputData->step2ResultData.fLTY)/2;
|
||||
// auto start = std::chrono::system_clock::now(); //计时开始
|
||||
cv::rectangle(pVStep2OutputData->cvImage,
|
||||
cv::rectangle(image,
|
||||
cv::Point(pVStep2OutputData->step2ResultData.fLTX, pVStep2OutputData->step2ResultData.fLTY),
|
||||
cv::Point(pVStep2OutputData->step2ResultData.fRBX, pVStep2OutputData->step2ResultData.fRBY),
|
||||
cvScalar, 2);
|
||||
cv::line(pVStep2OutputData->cvImage,
|
||||
cv::line(image,
|
||||
cv::Point(centerX, pVStep2OutputData->step2ResultData.fLTY-30), cv::Point(centerX, pVStep2OutputData->step2ResultData.fRBY+30),
|
||||
cvScalar, 1);
|
||||
|
||||
cv::Size text_size = cv::getTextSize(pVStep2OutputData->step2ResultData.transInfo.strTmpResult, cv::FONT_HERSHEY_SIMPLEX, 1, 2, 0);
|
||||
cv::Point linePoint(pVStep2OutputData->step2ResultData.fLTX, pVStep2OutputData->step2ResultData.fRBY + text_size.height + 5);
|
||||
cv::putText(pVStep2OutputData->cvImage,
|
||||
cv::putText(image,
|
||||
pVStep2OutputData->step2ResultData.transInfo.strTmpResult,
|
||||
linePoint,
|
||||
cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0, 180), 2);
|
||||
|
@ -118,11 +119,11 @@ APP_ERROR SaveDebugImageEngine::Process()
|
|||
{
|
||||
cvScalar = {0, 255, 255, 255};
|
||||
float centerX_corner = step2ResultData.fLTX + (step2ResultData.fRBX - step2ResultData.fLTX)/2;
|
||||
cv::rectangle(pVStep2OutputData->cvImage,
|
||||
cv::rectangle(image,
|
||||
cv::Point(step2ResultData.fLTX, step2ResultData.fLTY),
|
||||
cv::Point(step2ResultData.fRBX, step2ResultData.fRBY),
|
||||
cvScalar, 2);
|
||||
cv::line(pVStep2OutputData->cvImage,
|
||||
cv::line(image,
|
||||
cv::Point(centerX_corner, step2ResultData.fLTY-30), cv::Point(centerX_corner, step2ResultData.fRBY+30),
|
||||
cvScalar, 1);
|
||||
}
|
||||
|
@ -131,7 +132,7 @@ APP_ERROR SaveDebugImageEngine::Process()
|
|||
// auto end = std::chrono::system_clock::now();
|
||||
// LogDebug << "图片存储用时: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms " << strImagePath;
|
||||
|
||||
if (!cv::imwrite(strImagePath, pVStep2OutputData->cvImage, this->vecCompressionParams_))
|
||||
if (!cv::imwrite(strImagePath, image, this->vecCompressionParams_))
|
||||
{
|
||||
LogError << "图片存储失败:" << strImagePath;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ private:
|
|||
ai_matrix::BaseConfig baseConfig_;
|
||||
|
||||
std::string strPort0_;
|
||||
int iPicQuality_ = 100;
|
||||
int iPicQuality_ = 80;
|
||||
std::vector<int> vecCompressionParams_;
|
||||
|
||||
};
|
||||
|
|
|
@ -50,8 +50,7 @@ void SelectBestEngine::sendWSServer(DetectResultData &detectResultData)
|
|||
Json::Value jsonData;
|
||||
for (int i = 0; i < detectResultData.vecImage.size(); ++i)
|
||||
{
|
||||
jsonData["bestImgSid" + std::to_string(i)] = detectResultData.vecImage[i];
|
||||
strImage += detectResultData.vecImage[i];
|
||||
strImage += (detectResultData.strDetectDate + "/" + ai_matrix::StringUtil::getins()->replace_all_distinct(detectResultData.strDetectTime, ":", "-") + "/" + detectResultData.vecImage[i]);
|
||||
if (i < detectResultData.vecImage.size() - 1)
|
||||
{
|
||||
strImage += ",";
|
||||
|
@ -68,6 +67,8 @@ void SelectBestEngine::sendWSServer(DetectResultData &detectResultData)
|
|||
strContainerNo += ",";
|
||||
}
|
||||
}
|
||||
jsonData["detectDate"] = detectResultData.strDetectDate;
|
||||
jsonData["detectTime"] = detectResultData.strDetectTime;
|
||||
jsonData["containerNo"] = strContainerNo;
|
||||
jsonData["images"] = strImage;
|
||||
jsonData["status"] = "normal";
|
||||
|
@ -99,10 +100,24 @@ void SelectBestEngine::selectBest()
|
|||
auto contains = [] (const std::vector<std::string>& vec, std::string value) -> bool {
|
||||
return std::find(vec.begin(), vec.end(), value) != vec.end();
|
||||
};
|
||||
auto vCompare = [](std::string a, std::string b) {
|
||||
if (a.size() != b.size()) return 999;
|
||||
|
||||
int count = 0;
|
||||
for (int i = 0; i < a.size(); ++i)
|
||||
{
|
||||
if (a[i] != b[i])
|
||||
{
|
||||
++count;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
};
|
||||
DetectResultData detectResultData;
|
||||
detectResultData.strDetectDate = this->strDetectDate_;
|
||||
detectResultData.strDetectTime = this->strDetectTime_;
|
||||
|
||||
std::string strAlternativeResult;
|
||||
bool bHaveTwoContainer = false;
|
||||
for (auto & it_container : this->mapIndex_Containers_) {
|
||||
bool bHaveVerify = false;
|
||||
|
@ -129,21 +144,39 @@ void SelectBestEngine::selectBest()
|
|||
count = it_max.second;
|
||||
if (!contains(detectResultData.vecContainerNO, it_max.first)) {
|
||||
strBestContainer = it_max.first;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mapContainer_count.size() > 1)
|
||||
{
|
||||
for (auto &it_max : mapContainer_count)
|
||||
{
|
||||
if (it_max.first != strBestContainer && vCompare(it_max.first, strBestContainer) > 2)
|
||||
{
|
||||
strAlternativeResult = it_max.first;
|
||||
}
|
||||
}
|
||||
}
|
||||
detectResultData.vecContainerNO.emplace_back(strBestContainer);
|
||||
} else {
|
||||
// 取识别字数最长的
|
||||
std::string strBestContainer;
|
||||
int iMaxSize = 0;
|
||||
for (auto &it_max: mapContainer_count) {
|
||||
if (it_max.first.size() > iMaxSize) {
|
||||
iMaxSize = it_max.first.size();
|
||||
strBestContainer = it_max.first;
|
||||
}
|
||||
if (detectResultData.vecContainerNO.size() == 1 && !strAlternativeResult.empty())
|
||||
{
|
||||
detectResultData.vecContainerNO.emplace_back(strAlternativeResult);
|
||||
}
|
||||
else
|
||||
{
|
||||
// 取识别字数最长的
|
||||
std::string strBestContainer;
|
||||
int iMaxSize = 0;
|
||||
for (auto &it_max: mapContainer_count) {
|
||||
if (it_max.first.size() > iMaxSize) {
|
||||
iMaxSize = it_max.first.size();
|
||||
strBestContainer = it_max.first;
|
||||
}
|
||||
}
|
||||
detectResultData.vecContainerNO.emplace_back(strBestContainer);
|
||||
}
|
||||
detectResultData.vecContainerNO.emplace_back(strBestContainer);
|
||||
}
|
||||
}
|
||||
if (bHaveTwoContainer && detectResultData.vecContainerNO.size() < 2)
|
||||
|
@ -175,6 +208,12 @@ APP_ERROR SelectBestEngine::Process()
|
|||
|
||||
if (pVSelectBestData->bIsEnd)
|
||||
{
|
||||
if (this->strDetectDate_.empty())
|
||||
{
|
||||
this->strDetectDate_ = pVSelectBestData->strDetectDate;
|
||||
this->strDetectTime_ = pVSelectBestData->strDetectTime;
|
||||
}
|
||||
|
||||
this->iEndCount_++;
|
||||
if (!(this->iEndCount_ % this->vecDataSourceConfig_.size()))
|
||||
{
|
||||
|
@ -183,6 +222,7 @@ APP_ERROR SelectBestEngine::Process()
|
|||
}
|
||||
continue;
|
||||
}
|
||||
this->iEndCount_ = 0;
|
||||
|
||||
if (this->strImagePath_.empty())
|
||||
{
|
||||
|
|
|
@ -268,6 +268,7 @@ APP_ERROR ContainerStep1InferenceEngine::Process()
|
|||
|
||||
if (pVDetectInfo->cvImage.empty())
|
||||
{
|
||||
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pInferenceResultData), true);
|
||||
usleep(1000); //1ms
|
||||
continue;
|
||||
}
|
||||
|
@ -290,7 +291,7 @@ APP_ERROR ContainerStep1InferenceEngine::Process()
|
|||
|
||||
pInferenceResultData->strDetectDate = pVDetectInfo->strDetectDate;
|
||||
pInferenceResultData->strDetectTime = pVDetectInfo->strDetectTime;
|
||||
pInferenceResultData->cvImage = pVDetectInfo->cvImage;
|
||||
pInferenceResultData->cvImage = pVDetectInfo->cvImage.clone();
|
||||
|
||||
// 筛选离中心点坐标最近的,如果一样近就选最右的
|
||||
float fCenterX = IMAGE_WIDTH/2;
|
||||
|
@ -323,7 +324,7 @@ APP_ERROR ContainerStep1InferenceEngine::Process()
|
|||
singledata.fLTY = inferenceResult.bbox[1];
|
||||
singledata.fRBX = inferenceResult.bbox[2];
|
||||
singledata.fRBY = inferenceResult.bbox[3];
|
||||
singledata.fClear = inferenceResult.clear_id;
|
||||
singledata.fClear = inferenceResult.clear_conf;
|
||||
|
||||
pInferenceResultData->singleData = singledata;
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue