diff --git a/README.md b/README.md index c0d78d2..752456b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ # Train_Identify -支持单摄像头识别,支持集装箱号识别 \ No newline at end of file +支持单摄像头识别,支持集装箱号识别 + +董家口项目代码 +接口采用自动装车同类项目接口,图片用nginx映射静态文件服务器,识别车厢和集装箱号,每节上传HTTP接口。 + +因为现场为装车时识别车号,故车厢行进速度慢,识别时间长。可能会大量占用内存。需要将小站提升性能和交换内存。 diff --git a/ai_matrix/Config/Config.cpp b/ai_matrix/Config/Config.cpp index eefc386..0cb754b 100644 --- a/ai_matrix/Config/Config.cpp +++ b/ai_matrix/Config/Config.cpp @@ -42,6 +42,7 @@ namespace ai_matrix this->baseConfig_.strResultPath = config_["base"]["result_path"].as(); this->baseConfig_.strDebugResultPath = config_["base"]["debug_result_path"].as(); this->baseConfig_.iResultSaveDays = config_["base"]["result_save_days"].as(); + this->baseConfig_.strNginxUrl = config_["base"]["nginx_url"].as(); // 日志参数 this->logConfig_.strOutLevel = config_["log"]["out_level"].as(); @@ -85,7 +86,7 @@ namespace ai_matrix this->httpServerConfig_.strUserName = config_["http_server"]["username"].as(); this->httpServerConfig_.strPassword = config_["http_server"]["password"].as(); - // http服务器参数 + // minio服务器参数 this->minioConfig_.bIsUse = config_["minio"]["is_use"].as(); this->minioConfig_.strUrl = config_["minio"]["url"].as(); this->minioConfig_.strAccessKey = config_["minio"]["accesskey"].as(); diff --git a/ai_matrix/Config/Config.h b/ai_matrix/Config/Config.h index 105ae94..3b337f0 100644 --- a/ai_matrix/Config/Config.h +++ b/ai_matrix/Config/Config.h @@ -44,6 +44,8 @@ namespace ai_matrix std::string strDebugResultPath; // 日志存储天数 int iResultSaveDays; + // nginx 地址 + std::string strNginxUrl; }; // 日志参数 diff --git a/ai_matrix/myqueue/myqueue.h b/ai_matrix/myqueue/myqueue.h index 328d785..7930490 100644 --- a/ai_matrix/myqueue/myqueue.h +++ b/ai_matrix/myqueue/myqueue.h @@ -25,7 +25,6 @@ namespace ai_matrix while (queue_.size() >= max_size_ && isWait && !is_stoped_) { - printf("myqueue full"); cond_not_full_.wait(lk); } diff --git a/base/Log/Log.cpp b/base/Log/Log.cpp index b815d99..1e63b80 100644 --- a/base/Log/Log.cpp +++ b/base/Log/Log.cpp @@ -32,7 +32,7 @@ namespace MatrixAILog const int TIME_SIZE = 32; const int TIME_DIFF = 28800; // 8 hour const int BYTES6 = 6; - const int FILE_SIZE = 52428800; // 50M + const int FILE_SIZE = 52428800 * 2; // 50M uint32_t Log::logLevel = LOG_LEVEL_INFO; std::vector Log::levelString{"[Debug]", "[Info ]", "[Warn ]", "[Error]", "[Fatal]"}; std::mutex Log::mutex; diff --git a/common/CommonStruct.h b/common/CommonStruct.h index d088601..b0485dc 100644 --- a/common/CommonStruct.h +++ b/common/CommonStruct.h @@ -167,7 +167,7 @@ typedef struct { // 车厢编号 uint16_t iCarOrder = 0; // 车厢种类 - uint16_t iCategory = 0; + uint16_t iCategory = -1; // 车型 std::string strTType; // 车厢编号 diff --git a/config/config.yaml b/config/config.yaml index d024f90..0e043ab 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -14,6 +14,8 @@ base: debug_result_path: "./debug_result" # 结果存储天数 result_save_days: 10 + # nginx 地址 + nginx_url: "http://10.10.203.10:9010" # 日志参数 log: diff --git a/engine/CharacterConversionEngine/TrainCharacterConversionEngine.cpp b/engine/CharacterConversionEngine/TrainCharacterConversionEngine.cpp index 3f40ada..778920a 100644 --- a/engine/CharacterConversionEngine/TrainCharacterConversionEngine.cpp +++ b/engine/CharacterConversionEngine/TrainCharacterConversionEngine.cpp @@ -477,7 +477,7 @@ void TrainCharacterConversionEngine::transNum(Step2ResultData &step2ResultData, //校验车型是否符合验证 typeInfo.IsChkFlag = this->authTransNum(step2ResultData.iClassId, strTemp); - LogDebug << "--->>> 符合正则吗?" << typeInfo.IsChkFlag << " --- " << strTemp; +// LogDebug << "--->>> 符合正则吗?" << typeInfo.IsChkFlag << " --- " << strTemp; typeInfo.strTmpResult = strTemp; step2ResultData.vecTransInfo.emplace_back(typeInfo); diff --git a/engine/DataSourceEngine/VideoEngine.cpp b/engine/DataSourceEngine/VideoEngine.cpp index 55c935d..ec1db5c 100644 --- a/engine/DataSourceEngine/VideoEngine.cpp +++ b/engine/DataSourceEngine/VideoEngine.cpp @@ -1 +1 @@ -#include "VideoEngine.h" using namespace ai_matrix; namespace { const int LOW_THRESHOLD = 128; const int MAX_THRESHOLD = 4096; const uint16_t DELAY_TIME = 10000; } VideoEngine::VideoEngine() {} VideoEngine::~VideoEngine() {} APP_ERROR VideoEngine::Init() { dataSourceConfig_ = Config::getins()->getDataSourceConfig(); strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1"; LogInfo << "engineId_:" << engineId_ << " VideoEngine Init ok"; return APP_ERR_OK; } APP_ERROR VideoEngine::DeInit() { ResetCamera(); LogInfo << "engineId_:" << engineId_ << " VideoEngine DeInit ok"; return APP_ERR_OK; } void VideoEngine::ResetCamera() { if (pFormatCtx_ != nullptr) { // clear th cache of the queue avformat_close_input(&pFormatCtx_); pFormatCtx_ = nullptr; } } APP_ERROR VideoEngine::ConnectCamera() { pFormatCtx_ = CreateFormatContext(); // create context if (pFormatCtx_ == nullptr) { LogError << "engineId_:" << engineId_ << " pFormatCtx_ null!"; return APP_ERR_COMM_FAILURE; } //0-代表输入 av_dump_format(pFormatCtx_, 0, dataSourceConfig_.strUrl.c_str(), 0); // get stream infomation int iRet = APP_ERR_OK; iRet = GetStreamInfo(); if (iRet != APP_ERR_OK) { LogError << "engineId_:" << engineId_ << " Stream Info Check failed, iRet = " << iRet; return APP_ERR_COMM_FAILURE; } return APP_ERR_OK; } APP_ERROR VideoEngine::GetStreamInfo() { if (pFormatCtx_ != nullptr) { iVideoStream_ = -1; iAudioStream_ = -1; //frameInfo_.iFrameId = 0; //帧号从0开始 for (unsigned int i = 0; i < pFormatCtx_->nb_streams; i++) { AVStream *inStream = pFormatCtx_->streams[i]; if (inStream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { iVideoStream_ = i; frameInfo_.iHeight = inStream->codecpar->height; frameInfo_.iWidth = inStream->codecpar->width; //获取帧率,帧率的打印都在流中的两个成员.且应取平均帧率为先,为{x,0}或者{0,1}则取实时帧率 if (inStream->avg_frame_rate.den == 0 || (inStream->avg_frame_rate.num == 0 && inStream->avg_frame_rate.den == 1)) { frameInfo_.iRate = inStream->r_frame_rate.num / inStream->r_frame_rate.den; } else { frameInfo_.iRate = inStream->avg_frame_rate.num / inStream->avg_frame_rate.den; } frameInfo_.iRate = frameInfo_.iRate == 0 ? 25 : frameInfo_.iRate; LogDebug << "engineId_:" << engineId_ << " width:" << frameInfo_.iWidth << " height:" << frameInfo_.iHeight << " rate:" << frameInfo_.iRate << " iVideoStream_:" << iVideoStream_; } // else if (inStream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) // { // iAudioStream_ = i; // LogDebug << "engineId_:" << engineId_ << " iAudioStream_:" << iAudioStream_; // } } if (iVideoStream_ == -1) { LogError << "engineId_:" << engineId_ << " Didn't find a video stream!"; return APP_ERR_COMM_FAILURE; } if (frameInfo_.iHeight < LOW_THRESHOLD || frameInfo_.iWidth < LOW_THRESHOLD || frameInfo_.iHeight > MAX_THRESHOLD || frameInfo_.iWidth > MAX_THRESHOLD) { LogError << "engineId_:" << engineId_ << " Size of frame is not supported in DVPP Video Decode!"; return APP_ERR_COMM_FAILURE; } pCodecParameters_ = pFormatCtx_->streams[iVideoStream_]->codecpar; } return APP_ERR_OK; } AVFormatContext *VideoEngine::CreateFormatContext() { // create message for stream pull AVFormatContext *pFormatContext = nullptr; AVDictionary *pOptions = nullptr; // formatContext = avformat_alloc_context(); if (dataSourceConfig_.strUrl.find("rtsp:") != std::string::npos) // rtsp { //设置缓存大小,1080p可将值调大 av_dict_set(&pOptions, "buffer_size", "8192000", 0); //以tcp方式打开,如果以udp方式打开将tcp替换为udp av_dict_set(&pOptions, "rtsp_transport", "tcp", 0); //设置超时断开连接时间,单位微秒,3000000表示3秒 av_dict_set(&pOptions, "stimeout", "3000000", 0); //设置最大时延,单位微秒,1000000表示1秒 av_dict_set(&pOptions, "max_delay", "1000000", 0); //自动开启线程数 av_dict_set(&pOptions, "threads", "auto", 0); } //av_register_all(); //注册所有支持的格式(这里一定注册这些,否则会因为协议解析问题报错!!!) //avcodec_register_all(); //注册编解码器 //avformat_network_init(); //注册网格格式,如果为本地文件则可以去掉该代码 int iRet = avformat_open_input(&pFormatContext, dataSourceConfig_.strUrl.c_str(), nullptr, &pOptions); if (nullptr != pOptions) { av_dict_free(&pOptions); } if (iRet != 0) { LogError << "engineId_:" << engineId_ << " Couldn't open input stream " << dataSourceConfig_.strUrl.c_str() << ", iRet=" << iRet; return nullptr; } // pFormatContext->flags |= AVFMT_FLAG_NONBLOCK; // pFormatContext->pb->flags |= AVIO_FLAG_NONBLOCK; // av_dict_set(&pFormatContext->interrupt_callback.callback, "timeout", "3000", 0); // iRet = avio_open2(&pFormatContext->pb, dataSourceConfig_.strUrl.c_str(), AVIO_FLAG_READ, NULL, NULL) < 0; // { // // 处理错误 // LogError << "engineId_:" << engineId_ << "avio_open2 iRet=" << iRet; // return nullptr; // } iRet = avformat_find_stream_info(pFormatContext, nullptr); if (iRet != 0) { LogError << "engineId_:" << engineId_ << " Couldn't find stream information, iRet = " << iRet; return nullptr; } return pFormatContext; } //av_read_frame的中断回调函数 // int VideoEngine::InterruptCallback(void *pData) // { // TimeoutContext* pTimeOutCtx = (TimeoutContext*)pData; // LogDebug << "InterruptCallback i64Timeout:" << pTimeOutCtx->i64Timeout; // return std::chrono::duration_cast( // std::chrono::system_clock::now().time_since_epoch()) // .count() >= pTimeOutCtx->i64Timeout // ? AVERROR_EXIT // : 0; // } APP_ERROR VideoEngine::Process() { int iRet = APP_ERR_OK; // Pull data cyclically AVPacket pkt; while (!isStop_) { if (!g_identify_type) { if (!bConnectFlag_) { ResetCamera(); bConnectFlag_ = true; } std::this_thread::sleep_for(std::chrono::seconds(3)); //3秒后重连 continue; } //重连相机 if (bConnectFlag_) { iRet = ConnectCamera(); if (iRet == APP_ERR_OK) { LogInfo << "engineId_:" << engineId_ << " Start the stream......"; bConnectFlag_ = false; } else { // outputQueMap_[strPort1_]->push(std::static_pointer_cast(std::make_shared("摄像头连接失败!"))); ResetCamera(); bConnectFlag_ = true; std::this_thread::sleep_for(std::chrono::seconds(3)); //3秒后重连 continue; } } //设置av_read_frame中断函数 (中断函数中超过1s,则中断处理) // TimeoutContext timeoutCtx = { std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() + 1000 }; // pFormatCtx_->interrupt_callback.callback = &VideoEngine::InterruptCallback; // pFormatCtx_->interrupt_callback.opaque = &timeoutCtx; av_init_packet(&pkt); //init pkt iRet = av_read_frame(pFormatCtx_, &pkt); //需要一直读取,否则获取到的是历史数据 if (iRet != 0) { av_packet_unref(&pkt); if (dataSourceConfig_.strUrl.find("rtsp:") != std::string::npos) { LogError << "engineId_:" << engineId_ << " Read frame failed, reconnect iRet:" << iRet; //重连相机 ResetCamera(); bConnectFlag_ = true; } else { LogWarn << "----- 视频播放完毕 -----"; // //重连相机 ResetCamera(); bConnectFlag_ = true; //组织数据 std::shared_ptr pProcessData = std::make_shared(); // pProcessData->dataSourceInfo = frameInfo_; pProcessData->sourceFrameData.i64TimeStamp = TimeUtil::getins()->getCurrentTimeMillis(true); pProcessData->bIsTrainEnd = true; //push端口0,视频解码 iRet = outputQueMap_[strPort0_]->push(std::static_pointer_cast(pProcessData), true); if (iRet != APP_ERR_OK) { LogError << "数据推动失败,解码引擎关闭..."; } std::this_thread::sleep_for(std::chrono::seconds(3)); //3秒 }; continue; } if (pkt.stream_index == iVideoStream_) //只解码视频流 { // LogDebug << "iRet:" << iRet << " pkt.size:" << pkt.size; if (pkt.size <= 0) { LogError << "engineId_:" << engineId_ << " Invalid pkt.size: " << pkt.size; av_packet_unref(&pkt); continue; } if (dataSourceConfig_.strUrl.find(".mp4") != std::string::npos) { const char szStartCode[4] = {0, 0, 0, 1}; if (bIsAvc_ || memcmp(szStartCode, pkt.data, 4) != 0) { // is avc1 code, have no start code of H264 int iLen = 0; uint8_t *p = pkt.data; bIsAvc_ = true; do { // add start_code for each NAL, one frame may have multi NALs. iLen = ntohl(*((long *)p)); memcpy(p, szStartCode, 4); p += 4; p += iLen; if (p >= pkt.data + pkt.size) { break; } } while (1); } } void* pH264Buffer = nullptr; pH264Buffer = new uint8_t[pkt.size]; memcpy(pH264Buffer, pkt.data, pkt.size); //组织数据 std::shared_ptr pProcessData = std::make_shared(); pProcessData->pCodecParameters_ = this->pCodecParameters_; pProcessData->dataSourceInfo = frameInfo_; pProcessData->sourceFrameData.i64TimeStamp = TimeUtil::getins()->getCurrentTimeMillis(true); pProcessData->sourceFrameData.iSize = pkt.size; pProcessData->sourceFrameData.pData.reset(pH264Buffer, [](void* data){if(data) {delete[] data; data = nullptr;}}); //智能指针管理内存 //push端口0,视频解码 iRet = outputQueMap_[strPort0_]->push(std::static_pointer_cast(pProcessData), true); if (iRet != APP_ERR_OK) { LogError << "数据推动失败,解码引擎关闭..."; } } else { // LogError << "engineId_:" << engineId_ << " stream err stream_index:" << pkt.stream_index; } av_packet_unref(&pkt); //unref if (dataSourceConfig_.strUrl.find("rtsp:") == std::string::npos) // 如果不是rtsp,定时发送 { // std::this_thread::sleep_for(std::chrono::milliseconds(1000 / frameInfo_.iRate)); usleep(1000000 / frameInfo_.iRate); if (this->isStop_) return APP_ERR_OK; } } return APP_ERR_OK; } \ No newline at end of file +#include "VideoEngine.h" using namespace ai_matrix; namespace { const int LOW_THRESHOLD = 128; const int MAX_THRESHOLD = 4096; const uint16_t DELAY_TIME = 10000; } VideoEngine::VideoEngine() {} VideoEngine::~VideoEngine() {} APP_ERROR VideoEngine::Init() { dataSourceConfig_ = Config::getins()->getDataSourceConfig(); strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1"; LogInfo << "engineId_:" << engineId_ << " VideoEngine Init ok"; return APP_ERR_OK; } APP_ERROR VideoEngine::DeInit() { ResetCamera(); LogInfo << "engineId_:" << engineId_ << " VideoEngine DeInit ok"; return APP_ERR_OK; } void VideoEngine::ResetCamera() { if (pFormatCtx_ != nullptr) { // clear th cache of the queue avformat_close_input(&pFormatCtx_); pFormatCtx_ = nullptr; } } APP_ERROR VideoEngine::ConnectCamera() { pFormatCtx_ = CreateFormatContext(); // create context if (pFormatCtx_ == nullptr) { LogError << "engineId_:" << engineId_ << " pFormatCtx_ null!"; return APP_ERR_COMM_FAILURE; } //0-代表输入 av_dump_format(pFormatCtx_, 0, dataSourceConfig_.strUrl.c_str(), 0); // get stream infomation int iRet = APP_ERR_OK; iRet = GetStreamInfo(); if (iRet != APP_ERR_OK) { LogError << "engineId_:" << engineId_ << " Stream Info Check failed, iRet = " << iRet; return APP_ERR_COMM_FAILURE; } return APP_ERR_OK; } APP_ERROR VideoEngine::GetStreamInfo() { if (pFormatCtx_ != nullptr) { iVideoStream_ = -1; iAudioStream_ = -1; //frameInfo_.iFrameId = 0; //帧号从0开始 for (unsigned int i = 0; i < pFormatCtx_->nb_streams; i++) { AVStream *inStream = pFormatCtx_->streams[i]; if (inStream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { iVideoStream_ = i; frameInfo_.iHeight = inStream->codecpar->height; frameInfo_.iWidth = inStream->codecpar->width; //获取帧率,帧率的打印都在流中的两个成员.且应取平均帧率为先,为{x,0}或者{0,1}则取实时帧率 if (inStream->avg_frame_rate.den == 0 || (inStream->avg_frame_rate.num == 0 && inStream->avg_frame_rate.den == 1)) { frameInfo_.iRate = inStream->r_frame_rate.num / inStream->r_frame_rate.den; } else { frameInfo_.iRate = inStream->avg_frame_rate.num / inStream->avg_frame_rate.den; } frameInfo_.iRate = frameInfo_.iRate == 0 ? 25 : frameInfo_.iRate; LogDebug << "engineId_:" << engineId_ << " width:" << frameInfo_.iWidth << " height:" << frameInfo_.iHeight << " rate:" << frameInfo_.iRate << " iVideoStream_:" << iVideoStream_; } // else if (inStream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) // { // iAudioStream_ = i; // LogDebug << "engineId_:" << engineId_ << " iAudioStream_:" << iAudioStream_; // } } if (iVideoStream_ == -1) { LogError << "engineId_:" << engineId_ << " Didn't find a video stream!"; return APP_ERR_COMM_FAILURE; } if (frameInfo_.iHeight < LOW_THRESHOLD || frameInfo_.iWidth < LOW_THRESHOLD || frameInfo_.iHeight > MAX_THRESHOLD || frameInfo_.iWidth > MAX_THRESHOLD) { LogError << "engineId_:" << engineId_ << " Size of frame is not supported in DVPP Video Decode!"; return APP_ERR_COMM_FAILURE; } pCodecParameters_ = pFormatCtx_->streams[iVideoStream_]->codecpar; } return APP_ERR_OK; } AVFormatContext *VideoEngine::CreateFormatContext() { // create message for stream pull AVFormatContext *pFormatContext = nullptr; AVDictionary *pOptions = nullptr; // formatContext = avformat_alloc_context(); if (dataSourceConfig_.strUrl.find("rtsp:") != std::string::npos) // rtsp { //设置缓存大小,1080p可将值调大 av_dict_set(&pOptions, "buffer_size", "8192000", 0); //以tcp方式打开,如果以udp方式打开将tcp替换为udp av_dict_set(&pOptions, "rtsp_transport", "tcp", 0); //设置超时断开连接时间,单位微秒,3000000表示3秒 av_dict_set(&pOptions, "stimeout", "3000000", 0); //设置最大时延,单位微秒,1000000表示1秒 av_dict_set(&pOptions, "max_delay", "1000000", 0); //自动开启线程数 av_dict_set(&pOptions, "threads", "auto", 0); } //av_register_all(); //注册所有支持的格式(这里一定注册这些,否则会因为协议解析问题报错!!!) //avcodec_register_all(); //注册编解码器 //avformat_network_init(); //注册网格格式,如果为本地文件则可以去掉该代码 int iRet = avformat_open_input(&pFormatContext, dataSourceConfig_.strUrl.c_str(), nullptr, &pOptions); if (nullptr != pOptions) { av_dict_free(&pOptions); } if (iRet != 0) { LogError << "engineId_:" << engineId_ << " Couldn't open input stream " << dataSourceConfig_.strUrl.c_str() << ", iRet=" << iRet; return nullptr; } // pFormatContext->flags |= AVFMT_FLAG_NONBLOCK; // pFormatContext->pb->flags |= AVIO_FLAG_NONBLOCK; // av_dict_set(&pFormatContext->interrupt_callback.callback, "timeout", "3000", 0); // iRet = avio_open2(&pFormatContext->pb, dataSourceConfig_.strUrl.c_str(), AVIO_FLAG_READ, NULL, NULL) < 0; // { // // 处理错误 // LogError << "engineId_:" << engineId_ << "avio_open2 iRet=" << iRet; // return nullptr; // } iRet = avformat_find_stream_info(pFormatContext, nullptr); if (iRet != 0) { LogError << "engineId_:" << engineId_ << " Couldn't find stream information, iRet = " << iRet; return nullptr; } return pFormatContext; } //av_read_frame的中断回调函数 // int VideoEngine::InterruptCallback(void *pData) // { // TimeoutContext* pTimeOutCtx = (TimeoutContext*)pData; // LogDebug << "InterruptCallback i64Timeout:" << pTimeOutCtx->i64Timeout; // return std::chrono::duration_cast( // std::chrono::system_clock::now().time_since_epoch()) // .count() >= pTimeOutCtx->i64Timeout // ? AVERROR_EXIT // : 0; // } APP_ERROR VideoEngine::Process() { int iRet = APP_ERR_OK; // Pull data cyclically AVPacket pkt; while (!isStop_) { if (!g_identify_type) { if (!bConnectFlag_) { ResetCamera(); bConnectFlag_ = true; } std::this_thread::sleep_for(std::chrono::seconds(3)); //3秒后重连 continue; } //重连相机 if (bConnectFlag_) { iRet = ConnectCamera(); if (iRet == APP_ERR_OK) { LogInfo << "engineId_:" << engineId_ << " Start the stream......"; bConnectFlag_ = false; } else { // outputQueMap_[strPort1_]->push(std::static_pointer_cast(std::make_shared("摄像头连接失败!"))); ResetCamera(); bConnectFlag_ = true; std::this_thread::sleep_for(std::chrono::seconds(3)); //3秒后重连 continue; } } //设置av_read_frame中断函数 (中断函数中超过1s,则中断处理) // TimeoutContext timeoutCtx = { std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count() + 1000 }; // pFormatCtx_->interrupt_callback.callback = &VideoEngine::InterruptCallback; // pFormatCtx_->interrupt_callback.opaque = &timeoutCtx; av_init_packet(&pkt); //init pkt iRet = av_read_frame(pFormatCtx_, &pkt); //需要一直读取,否则获取到的是历史数据 if (iRet != 0) { av_packet_unref(&pkt); if (dataSourceConfig_.strUrl.find("rtsp:") != std::string::npos) { LogError << "engineId_:" << engineId_ << " Read frame failed, reconnect iRet:" << iRet; //重连相机 ResetCamera(); bConnectFlag_ = true; } else { LogWarn << "----- 视频播放完毕 -----"; // //重连相机 ResetCamera(); bConnectFlag_ = true; //组织数据 std::shared_ptr pProcessData = std::make_shared(); // pProcessData->dataSourceInfo = frameInfo_; pProcessData->sourceFrameData.i64TimeStamp = TimeUtil::getins()->getCurrentTimeMillis(true); pProcessData->bIsTrainEnd = true; //push端口0,视频解码 iRet = outputQueMap_[strPort0_]->push(std::static_pointer_cast(pProcessData), true); if (iRet != APP_ERR_OK) { LogError << "数据推动失败,解码引擎关闭..."; } std::this_thread::sleep_for(std::chrono::seconds(3)); //3秒 }; continue; } if (pkt.stream_index == iVideoStream_) //只解码视频流 { // LogDebug << "iRet:" << iRet << " pkt.size:" << pkt.size; if (pkt.size <= 0) { LogError << "engineId_:" << engineId_ << " Invalid pkt.size: " << pkt.size; av_packet_unref(&pkt); continue; } if (dataSourceConfig_.strUrl.find(".mp4") != std::string::npos) { const char szStartCode[4] = {0, 0, 0, 1}; if (bIsAvc_ || memcmp(szStartCode, pkt.data, 4) != 0) { // is avc1 code, have no start code of H264 int iLen = 0; uint8_t *p = pkt.data; bIsAvc_ = true; do { // add start_code for each NAL, one frame may have multi NALs. iLen = ntohl(*((long *)p)); memcpy(p, szStartCode, 4); p += 4; p += iLen; if (p >= pkt.data + pkt.size) { break; } } while (1); } } void* pH264Buffer = nullptr; pH264Buffer = new uint8_t[pkt.size]; memcpy(pH264Buffer, pkt.data, pkt.size); //组织数据 std::shared_ptr pProcessData = std::make_shared(); pProcessData->pCodecParameters_ = this->pCodecParameters_; pProcessData->dataSourceInfo = frameInfo_; pProcessData->sourceFrameData.i64TimeStamp = TimeUtil::getins()->getCurrentTimeMillis(true); pProcessData->sourceFrameData.iSize = pkt.size; pProcessData->sourceFrameData.pData.reset(pH264Buffer, [](void* data){if(data) {delete[] data; data = nullptr;}}); //智能指针管理内存 //push端口0,视频解码 iRet = outputQueMap_[strPort0_]->push(std::static_pointer_cast(pProcessData), true); if (iRet != APP_ERR_OK) { LogError << "数据推动失败,解码引擎关闭..."; } } else { // LogError << "engineId_:" << engineId_ << " stream err stream_index:" << pkt.stream_index; } av_packet_unref(&pkt); //unref if (dataSourceConfig_.strUrl.find("rtsp:") == std::string::npos) // 如果不是rtsp,定时发送 { // std::this_thread::sleep_for(std::chrono::milliseconds(1000 / frameInfo_.iRate)); // usleep(1000000 / frameInfo_.iRate); if (this->isStop_) return APP_ERR_OK; } } return APP_ERR_OK; } \ No newline at end of file diff --git a/engine/DataUploadEngine/ToHttpSrvEngine.cpp b/engine/DataUploadEngine/ToHttpSrvEngine.cpp index c02c5f8..ead9165 100644 --- a/engine/DataUploadEngine/ToHttpSrvEngine.cpp +++ b/engine/DataUploadEngine/ToHttpSrvEngine.cpp @@ -1 +1 @@ -#include "ToHttpSrvEngine.h" ToHttpSrvEngine::ToHttpSrvEngine() {} ToHttpSrvEngine::~ToHttpSrvEngine() {} APP_ERROR ToHttpSrvEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; this->httpServerConfig_ = Config::getins()->getHttpServerConfig(); LogInfo << "ToHttpSrvEngine Init ok"; return APP_ERR_OK; } APP_ERROR ToHttpSrvEngine::DeInit() { LogInfo << "ToHttpSrvEngine DeInit ok"; return APP_ERR_OK; } APP_ERROR ToHttpSrvEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } if (!this->httpServerConfig_.bIsUse) { usleep(1000); //1ms continue; } std::shared_ptr pTrain = std::static_pointer_cast(pVoidData0); int iCategory = 0; if (pTrain->iCategory == 3) { iCategory = 0; } else if(pTrain->iCategory == 2) { iCategory = 1; } else if (pTrain->iCategory == 6) { iCategory = 2; } else if (pTrain->iCategory == 0) { iCategory = 3; } //组装post信息 Json::Value jvRequest; jvRequest["trackName"] = pTrain->strTrackName; jvRequest["cameraNumber"] = 0;// 摄像头编号 jvRequest["comeTime"] = pTrain->strTrainDate + " " + pTrain->strTrainTime;// 来车时间 jvRequest["direction"] = pTrain->iTDirection;// 来车方向 jvRequest["carriageOrder"] = pTrain->iCarOrder;// 车节号 jvRequest["carriageCategory"] = iCategory;// 车厢类别:0敞车,1:漏洞矿车,2:平车,3:车头 jvRequest["carriageType"] = pTrain->strTType;// 车型 jvRequest["carriageNumber"] = pTrain->strTNum;// 车厢号 jvRequest["carriageTareweight"] = pTrain->strTTareWeight;// 皮重 jvRequest["carriageLoad"] = pTrain->strTLoadWeight;// 载重 jvRequest["carriageChange"] = pTrain->strTChangeLen;// 换长 jvRequest["numImageName"] = pTrain->strTNum_image; // 车号图片 jvRequest["proImageName"] = pTrain->strTPro_image; // 属性图片 for (const auto & it : pTrain->vecContainer) { jvRequest["containerNumber"].append(it.strContainerNo);// 集装箱 jvRequest["containerImageName"].append(it.strImg); // 集装箱图片 } jvRequest["isTheLast"] = pTrain->bIsTheLast ? 1 : 0;// 是否最后一节: 0:否,1:是 jvRequest["identifyTime"] = pTrain->strEndTime;//车厢切分的时间 if (!ToWeb::getins()->upWeb(jvRequest, 1)) { } } return APP_ERR_OK; } \ No newline at end of file +#include "ToHttpSrvEngine.h" ToHttpSrvEngine::ToHttpSrvEngine() {} ToHttpSrvEngine::~ToHttpSrvEngine() {} APP_ERROR ToHttpSrvEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; this->httpServerConfig_ = Config::getins()->getHttpServerConfig(); this->baseConfig_ = Config::getins()->getBaseConfig(); LogInfo << "ToHttpSrvEngine Init ok"; return APP_ERR_OK; } APP_ERROR ToHttpSrvEngine::DeInit() { LogInfo << "ToHttpSrvEngine DeInit ok"; return APP_ERR_OK; } APP_ERROR ToHttpSrvEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } if (!this->httpServerConfig_.bIsUse) { usleep(1000); //1ms continue; } std::shared_ptr pTrain = std::static_pointer_cast(pVoidData0); int iCategory = 0; if (pTrain->iCategory == 3) { iCategory = 0; } else if(pTrain->iCategory == 2) { iCategory = 1; } else if (pTrain->iCategory == 6) { iCategory = 2; } else if (pTrain->iCategory == 0) { iCategory = 3; } std::string strDes = this->baseConfig_.strNginxUrl + "/" + pTrain->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pTrain->strTrainTime, ":", "-") + "/"; //组装post信息 Json::Value jvRequest; Json::Value jvSubObj; jvSubObj["poundNo"] = pTrain->strTrackName; // 股道号 jvRequest["trainParams"] = jvSubObj; // jvRequest["trackName"] = pTrain->strTrackName; // 股道号/名称 jvRequest["cameraNumber"] = 0;// 摄像头编号 jvRequest["comeTime"] = pTrain->strTrainDate + " " + pTrain->strTrainTime;// 来车时间 jvRequest["direction"] = pTrain->iTDirection;// 来车方向 jvRequest["carriageOrder"] = pTrain->iCarOrder;// 车节号 jvRequest["carriageCategory"] = iCategory;// 车厢类别:0敞车,1:漏洞矿车,2:平车,3:车头 jvRequest["carriageType"] = pTrain->strTType;// 车型 jvRequest["carriageNumber"] = pTrain->strTNum;// 车厢号 jvRequest["carriageTareweight"] = pTrain->strTTareWeight;// 皮重 jvRequest["carriageLoad"] = pTrain->strTLoadWeight;// 载重 jvRequest["carriageChange"] = pTrain->strTChangeLen;// 换长 jvRequest["numImageName"] = strDes + pTrain->strTNum_image; // 车号图片 jvRequest["proImageName"] = strDes + pTrain->strTPro_image; // 属性图片 int i = 1; for (const auto & it : pTrain->vecContainer) { jvRequest["containerNo" + std::to_string(i)] = it.strContainerNo;// 集装箱 jvRequest["containerImageName" + std::to_string(i)] = strDes + it.strImg; // 集装箱图片 ++i; } jvRequest["isTheLast"] = pTrain->bIsTheLast ? 1 : 0;// 是否最后一节: 0:否,1:是 jvRequest["identifyTime"] = pTrain->strEndTime;//车厢切分的时间 if (!ToWeb::getins()->upWeb(jvRequest, 1)) { } } return APP_ERR_OK; } \ No newline at end of file diff --git a/engine/DataUploadEngine/ToHttpSrvEngine.h b/engine/DataUploadEngine/ToHttpSrvEngine.h index c4df93e..d26daf9 100644 --- a/engine/DataUploadEngine/ToHttpSrvEngine.h +++ b/engine/DataUploadEngine/ToHttpSrvEngine.h @@ -32,6 +32,7 @@ private: std::string strPort0_; ai_matrix::HttpServerConfig httpServerConfig_; + ai_matrix::BaseConfig baseConfig_; int iNoDataCnt_ = 0; diff --git a/engine/SaveDebugImageEngine/SaveDebugImageEngine.cpp b/engine/SaveDebugImageEngine/SaveDebugImageEngine.cpp index fb97a2d..0850a38 100644 --- a/engine/SaveDebugImageEngine/SaveDebugImageEngine.cpp +++ b/engine/SaveDebugImageEngine/SaveDebugImageEngine.cpp @@ -122,6 +122,12 @@ APP_ERROR SaveDebugImageEngine::Process() cv::Mat image = cv::imread(strImagePath); + if (image.empty()) + { + LogWarn << "未能读取到需要标注的图像:" << strImagePath; + continue; + } + std::stringstream ss; std::vector vecTitle; vecTitle.emplace_back("FrameID:" + to_string(pVStep2OutputData->iFrameId)); diff --git a/engine/Step1FilterInferenceEngine/TrainStep1FilterEngine.cpp b/engine/Step1FilterInferenceEngine/TrainStep1FilterEngine.cpp index 039fb32..08c1518 100644 --- a/engine/Step1FilterInferenceEngine/TrainStep1FilterEngine.cpp +++ b/engine/Step1FilterInferenceEngine/TrainStep1FilterEngine.cpp @@ -1 +1 @@ -#include "TrainStep1FilterEngine.h" using namespace ai_matrix; //namespace //{ // //按照x坐标排列 // bool CompareX(const SingleData &a, const SingleData &b) // { // return a.fLTX < b.fLTX; // } //} TrainStep1FilterEngine::TrainStep1FilterEngine() {} TrainStep1FilterEngine::~TrainStep1FilterEngine() {} APP_ERROR TrainStep1FilterEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; this->baseConfig_ = Config::getins()->getBaseConfig(); this->dataSourceConfig_ = Config::getins()->getDataSourceConfig(); this->identifyConfig_ = Config::getins()->getIdentifyConfig(); this->mapTargetStr_.insert(std::make_pair(NUM, "NUM")); this->mapTargetStr_.insert(std::make_pair(PRO, "PRO")); this->mapTargetStr_.insert(std::make_pair(HEAD, "HEAD")); this->mapTargetStr_.insert(std::make_pair(SPACE, "SPACE"));//SPACE this->mapTargetStr_.insert(std::make_pair(TRAINSPACE, "SPACE"));//SPACE this->mapTargetStr_.insert(std::make_pair(CONTAINER, "CONTAINER"));//CONTAINER this->initParam(); LogInfo << "TrainStep1FilterEngine Init ok"; return APP_ERR_OK; } APP_ERROR TrainStep1FilterEngine::DeInit() { LogInfo << "TrainStep1FilterEngine DeInit ok"; return APP_ERR_OK; } /** * 参数初始化(列车结束时需调用) */ void TrainStep1FilterEngine::initParam() { this->pInferenceResultDataPre_ = nullptr; this->iNotChgCount_ = 0; while (!this->stackBackInfo_.empty()) { this->stackBackInfo_.pop(); } while (!this->queInferenceResultData_.empty()) { this->queInferenceResultData_.pop(); } iTrainStatus_ = TRAINSTATUS_RUN; g_come_direction = DIRECTION_UNKNOWN; mapCalDirection_.clear(); } void TrainStep1FilterEngine::addBackInfo() { std::string strAllClassType; for (size_t i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { if (strAllClassType.find(this->mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]) != std::string::npos) { continue; } strAllClassType += this->mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]; } if (strAllClassType.empty()) { return; } TrainBackInfo trainBackInfo; trainBackInfo.pInferenceResultData = this->pInferenceResultDataPre_; trainBackInfo.strAllClassType = strAllClassType; if (this->stackBackInfo_.empty()) { this->stackBackInfo_.push(trainBackInfo); LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 新增倒车信息:" << strAllClassType << " 当前数量:" << this->stackBackInfo_.size(); } else { TrainBackInfo trainBackInfoTop = stackBackInfo_.top(); if (trainBackInfoTop.strAllClassType != trainBackInfo.strAllClassType) { if (((g_come_direction == DIRECTION_RIGHT && this->dataSourceConfig_.iRightFirst == RIGHT_RUN_AND_PRO_FIRST) || (g_come_direction == DIRECTION_LEFT && this->dataSourceConfig_.iLeftFirst == LEFT_RUN_AND_PRO_FIRST)) && ((trainBackInfo.strAllClassType == "SPACE" && (trainBackInfoTop.strAllClassType == "PROSPACE" || trainBackInfoTop.strAllClassType == "SPACEPRO")) || (trainBackInfo.strAllClassType == "NUM" && (trainBackInfoTop.strAllClassType == "NUMSPACE" || trainBackInfoTop.strAllClassType == "SPACENUM")) || ((trainBackInfo.strAllClassType == "PROSPACE" || trainBackInfo.strAllClassType == "SPACEPRO") && trainBackInfoTop.strAllClassType == "PRO"))) { return; } if (((g_come_direction == DIRECTION_RIGHT && this->dataSourceConfig_.iRightFirst == RIGHT_RUN_AND_NUM_FIRST) || (g_come_direction == DIRECTION_LEFT && this->dataSourceConfig_.iRightFirst == LEFT_RUN_AND_NUM_FIRST)) && ((trainBackInfo.strAllClassType == "SPACE" && (trainBackInfoTop.strAllClassType == "NUMSPACE" || trainBackInfoTop.strAllClassType == "SPACENUM")) || (trainBackInfo.strAllClassType == "PRO" && (trainBackInfoTop.strAllClassType == "PROSPACE" || trainBackInfoTop.strAllClassType == "SPACEPRO")) || ((trainBackInfo.strAllClassType == "NUMSPACE" || trainBackInfo.strAllClassType == "SPACENUM") && trainBackInfoTop.strAllClassType == "NUM"))) { return; } this->stackBackInfo_.push(trainBackInfo); LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 新增倒车信息:" << strAllClassType << " 当前数量:" << this->stackBackInfo_.size(); } } } bool TrainStep1FilterEngine::isEndDealBackInfo() { if (this->stackBackInfo_.empty()) { return true; } bool bPopFlag = false; if (this->pInferenceResultDataPre_->vecSingleData.size() == 0) return false; /* 处理倒车数据时,数据需设置为倒车,主要是保证这样的数据后面Engine不处理,防止切分车厢出错。 类型不相等时,就pop,当pop后,还剩一个数据时,则表示已经回到了刚开始倒车的地方。(只剩一个数据的逻辑在上方) 处理最后一个时,不能只判断下类型相同就弹出。需要控制下位置。(要么类型相同位置合适,要么类型不相同) 正向为向左行驶,则当前数据的位置尽量小于等于栈中最后一个元素的位置。 正向为向右行驶,则当前数据的位置尽量大于等于栈中最后一个元素的位置。 */ // std::sort(this->pInferenceResultDataPre_->vecSingleData.begin(), this->pInferenceResultDataPre_->vecSingleData.end(), CompareX); std::string strAllClassType; for (size_t i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { if (strAllClassType.find(mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]) != std::string::npos) { continue; } strAllClassType += mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]; } if (strAllClassType.empty()) { return false; } if (stackBackInfo_.size() == 1) { TrainBackInfo trainBackInfoLast = stackBackInfo_.top(); std::shared_ptr pInferenceResultDataBack = std::static_pointer_cast(trainBackInfoLast.pInferenceResultData); // std::sort(pInferenceResultDataBack->vecSingleData.begin(), pInferenceResultDataBack->vecSingleData.end(), CompareX); for (size_t i = 0; i < pInferenceResultDataBack->vecSingleData.size(); i++) { int bFlag = -1; for (size_t j = 0; j < pInferenceResultDataPre_->vecSingleData.size(); j++) { if (pInferenceResultDataBack->vecSingleData[i].iClassId == pInferenceResultDataPre_->vecSingleData[j].iClassId) { if (pInferenceResultDataPre_->vecSingleData[j].fLTX < 1 || pInferenceResultDataBack->vecSingleData[i].fLTX < 1) { LogDebug << "大框X坐标小于1,判定为异常大框。过滤!!"; break; } bFlag = (pInferenceResultDataBack->vecSingleData[i].fLTX <= pInferenceResultDataPre_->vecSingleData[j].fLTX) ? 1 : 0; LogDebug << "帧:" << pInferenceResultDataPre_->iFrameId << " 倒车前帧:" << pInferenceResultDataBack->iFrameId << " 恢复到原位:" << bFlag << " 当前框位置:" << pInferenceResultDataPre_->vecSingleData[i].fLTX << " 倒车前位置:" << pInferenceResultDataBack->vecSingleData[i].fLTX << "方向:" << g_come_direction; } } if ((g_come_direction == DIRECTION_LEFT && bFlag == 0) || (g_come_direction == DIRECTION_RIGHT && bFlag == 1)) { bPopFlag = true; break; } } if (bPopFlag) { LogDebug << "frameId:" << pInferenceResultDataPre_->iFrameId << " 恢复倒车前的位置:" << bPopFlag; stackBackInfo_.pop(); } } else { TrainBackInfo trainBackInfoTop_bak = stackBackInfo_.top(); stackBackInfo_.pop(); TrainBackInfo trainBackInfoTop = stackBackInfo_.top(); if (trainBackInfoTop.strAllClassType != strAllClassType) { stackBackInfo_.push(trainBackInfoTop_bak); LogDebug << "帧:" << pInferenceResultDataPre_->iFrameId << " 倒车信息:" << stackBackInfo_.size() << " 顶部倒车信息:" << trainBackInfoTop.strAllClassType << " 本次识别信息:" << strAllClassType; } else { LogDebug << "帧:" << pInferenceResultDataPre_->iFrameId << " 倒车信息:" << stackBackInfo_.size() << " 顶部倒车信息:" << trainBackInfoTop.strAllClassType << " 本次识别信息:" << strAllClassType << " 删除倒车信息:" << trainBackInfoTop_bak.strAllClassType; } } return stackBackInfo_.empty() ? true : false; } /** * 校验火车是否停止 * return : true:停止; false:非停止 1(正常行驶) 2(停车) 3(倒车) */ int TrainStep1FilterEngine::getTrainStatus() { if (g_come_direction == DIRECTION_UNKNOWN) { LogDebug << " frameId:" << this->pInferenceResultDataPre_->iFrameId << " 未判断出行车方向,暂定认为火车正常行驶中"; return TRAINSTATUS_RUN; } // 无框时,返回之前的列车状态 if (this->pInferenceResultDataPre_->vecSingleData.size() == 0) { return iTrainStatus_; } queInferenceResultData_.push(this->pInferenceResultDataPre_); if (queInferenceResultData_.size() < 3) { return TRAINSTATUS_RUN; } std::shared_ptr pInferenceResultDataFront = queInferenceResultData_.front(); // iNotChgCount_大于0表示有可能停车,此时pop队列数据要多留存几个。用最开始的数据来判断是否真正停车,如果每次只用上上帧判断当列车超级慢时可能判断为停车。 int iSizeTemp = iNotChgCount_ > 0 ? 10 : 2; while (queInferenceResultData_.size() > iSizeTemp) { queInferenceResultData_.pop(); } // LogDebug << "frameId:" << pProcessData->iFrameId << " 判断运动状态队列 第一帧:" << postDataFront.iFrameId << " 队列size:" << quePostData_.size() << " iSizeTemp:" << iSizeTemp; bool bSameFlag = false; for (size_t i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { SingleData singleDataBack = this->pInferenceResultDataPre_->vecSingleData[i]; for (size_t j = 0; j < pInferenceResultDataFront->vecSingleData.size(); j++) { SingleData singleDataFront = pInferenceResultDataFront->vecSingleData[j]; /* 使用iBigClassId,可能出现平车只有间隔大框,且间隔大框可以一会是平车间隔,一会是通用间隔。导致类别不一样 使用iTargetType,可能出现平车只有间隔大框,且间隔大框可以一会是平车间隔,一会是通用间隔。导致像素差判断不准。 */ if (singleDataFront.iTargetType != singleDataBack.iTargetType) { LogDebug << "判断前后帧识别的是否一致 上一个:" << singleDataFront.iTargetType << " 当前:" << singleDataBack.iTargetType; continue; } if (singleDataFront.iTargetType == CONTAINER) continue; bSameFlag = true; int iCenterBack = singleDataBack.fLTX + (singleDataBack.fRBX - singleDataBack.fLTX) / 2; int iCenterFront = singleDataFront.fLTX + (singleDataFront.fRBX - singleDataFront.fLTX) / 2; if (abs(iCenterBack - iCenterFront) > this->identifyConfig_.iChkstopPx) { iNotChgCount_ = 0; /* iCenterBack > iCenterFront 表示向右行驶,且原方向为向左行驶 iCenterBack < iCenterFront 表示向左行驶,且原方向为向右行驶 以上2种表示倒车。 */ if ((iCenterBack > iCenterFront && g_come_direction == DIRECTION_LEFT) || (iCenterBack < iCenterFront && g_come_direction == DIRECTION_RIGHT)) { if (this->identifyConfig_.iPartitionFrameSpan < (this->pInferenceResultDataPre_->iFrameId - pInferenceResultDataFront->iFrameId) && this->identifyConfig_.iSplitFrameSpanPx < abs(iCenterBack - iCenterFront)) { return TRAINSTATUS_RUN; } // LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 检测到火车倒车"; return TRAINSTATUS_BACK; } else { // LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 正常行驶"; return TRAINSTATUS_RUN; } } /* 小于10个像素表示可能停车,累计未变化次数。 累计变化次数超过10次,返回停车 累计变化次数未超过10次,返回之前行驶状态 */ else { iNotChgCount_++; LogDebug << " frameId:" << this->pInferenceResultDataPre_->iFrameId << " 大框移动范围小(" << abs(iCenterBack - iCenterFront) << ") 判断停车计数:" << iNotChgCount_ << "/" << this->identifyConfig_.iChkstopCount; if (iNotChgCount_ > this->identifyConfig_.iChkstopCount) { LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 检测到火车停车"; return TRAINSTATUS_STOP; } else { // LogDebug << "frameId:" << pProcessData->iFrameId << " iTrainStatus_:" << iTrainStatus_; return iTrainStatus_; } } } } /* 未找到相同的框,说明是老框消失掉了,新框出现了。 按新框出现的位置判断是向左行驶,还是向右行驶。 */ LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " bSameFlag:" << bSameFlag; if (!bSameFlag) { // std::sort(this->pInferenceResultDataPre_->vecSingleData.begin(), this->pInferenceResultDataPre_->vecSingleData.end(), CompareX); SingleData singleData = this->pInferenceResultDataPre_->vecSingleData.front(); if (g_come_direction == DIRECTION_LEFT) { singleData = this->pInferenceResultDataPre_->vecSingleData.back(); } LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " fLTX:" << singleData.fLTX << " fRBX:" << singleData.fRBX; iNotChgCount_ = 0; int iCenter = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; int iValue = IMAGE_WIDTH / 2; if ((iCenter > iValue && g_come_direction == DIRECTION_RIGHT) || (iCenter < iValue && g_come_direction == DIRECTION_LEFT)) { /* 针对有效帧较少时,和上上帧比较没有同类型大框,且当前帧已行驶到画面中心导致误判的情况, 增加和上帧同类型大框的比较处理。 */ std::shared_ptr pInferenceResultDataMiddle = queInferenceResultData_.front(); for (size_t i = 0; i < pInferenceResultDataPre_->vecSingleData.size(); i++) { SingleData singleDataBack = pInferenceResultDataPre_->vecSingleData[i]; for (size_t j = 0; j < pInferenceResultDataMiddle->vecSingleData.size(); j++) { SingleData singleDataMiddle = pInferenceResultDataMiddle->vecSingleData[j]; if (singleDataMiddle.iTargetType != singleDataBack.iTargetType) { continue; } int iCenterBack = singleDataBack.fLTX + (singleDataBack.fRBX - singleDataBack.fLTX) / 2; int iCenterMiddle = singleDataMiddle.fLTX + (singleDataMiddle.fRBX - singleDataMiddle.fLTX) / 2; // 位置比较大于10个像素,则表示有移动。再判断时正向移动,还是倒车 LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " " << iCenterBack << "-" << iCenterMiddle << "=" << abs(iCenterBack - iCenterMiddle) << " 目标差值:" << this->identifyConfig_.iChkstopPx; if (abs(iCenterBack - iCenterMiddle) > this->identifyConfig_.iChkstopPx) { if ((iCenterBack > iCenterMiddle && g_come_direction == DIRECTION_LEFT) || (iCenterBack < iCenterMiddle && g_come_direction == DIRECTION_RIGHT)) { LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 检测到火车倒车"; return TRAINSTATUS_BACK; } else { LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 正常行驶"; return TRAINSTATUS_RUN; } } } } // LogDebug << "frameId:" << pProcessData->iFrameId << " back2"; return iTrainStatus_; } } // LogDebug << "frameId:" << pProcessData->iFrameId << " iNotChgCount_:" << iNotChgCount_ << " run run"; return TRAINSTATUS_RUN; } /** * 计算行车方向新 */ void TrainStep1FilterEngine::calculateDirection() { /* 连续3帧同目标识别框信息 判断位置差异是否超过10px(判停车参数),且两两之间都是线性。如果符合则计算方向。 上述条件不符合则剔除第一个元素,再次累计连续3帧处理。 */ for (auto iter = this->pInferenceResultDataPre_->vecSingleData.begin(); iter != this->pInferenceResultDataPre_->vecSingleData.end(); iter++) { // 火车车头和集装箱号 暂不参与方向判断 if (iter->iClassId == CONTAINERNUM || iter->iClassId == TRAIN_HEAD) continue; CalculateInfo calInfo; calInfo.iFrameId = this->pInferenceResultDataPre_->iFrameId; calInfo.iBigClassId = iter->iClassId; calInfo.fCenterX = iter->fLTX + (iter->fRBX - iter->fLTX) / 2; calInfo.fTargetWidth = iter->fRBX - iter->fLTX; auto iterSubMap = this->mapCalDirection_.find(iter->iClassId); if (iterSubMap == this->mapCalDirection_.end()) { std::vector vecTemp; this->mapCalDirection_.insert(std::make_pair(iter->iClassId, vecTemp)); iterSubMap = this->mapCalDirection_.find(iter->iClassId); } iterSubMap->second.emplace_back(calInfo); if (iterSubMap->second.size() > 2) { LogDebug << " frameid:" << this->pInferenceResultDataPre_->iFrameId << " last:" << iterSubMap->second.at(2).iFrameId << " " << iterSubMap->second.at(2).fCenterX << " mid:" << iterSubMap->second.at(1).iFrameId << " " << iterSubMap->second.at(1).fCenterX << " pre:" << iterSubMap->second.at(0).iFrameId << " " << iterSubMap->second.at(0).fCenterX; //如果帧号连续,且移动位置大于15px,则计算方向 if (iterSubMap->second.at(2).iFrameId - iterSubMap->second.at(1).iFrameId != 1 || iterSubMap->second.at(1).iFrameId - iterSubMap->second.at(0).iFrameId != 1) { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } if (abs(iterSubMap->second.at(0).fTargetWidth - iterSubMap->second.at(2).fTargetWidth) >= 1.5*this->identifyConfig_.iChkstopPx) { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } int iLast = iterSubMap->second.at(2).fCenterX; int iMid = iterSubMap->second.at(1).fCenterX; int iPre = iterSubMap->second.at(0).fCenterX; if (abs(iPre - iLast) <= this->identifyConfig_.iChkstopPx) { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } if (iPre <= iMid && iMid <= iLast) { g_come_direction = DIRECTION_RIGHT; } else if (iPre >= iMid && iMid >= iLast) { g_come_direction = DIRECTION_LEFT; } else { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } // LogDebug << " frameid:" << pInferenceResultDataPre_->iFrameId << " iDirection_:" << g_come_direction; } } } void TrainStep1FilterEngine::sendComeTrain(const std::string strTrainDate, const std::string strTrainName, const int iDirection) { std::string message = "{\"cometime\":\"" + strTrainDate + " " + strTrainName + "\",\"type\":\"1\",\"direction\":" + std::to_string(iDirection == this->dataSourceConfig_.iDirection ? 1:-1) + "}"; LogWarn << message; outputQueMap_[engineName_ + "_" + std::to_string(engineId_) + "_1"]->push(std::static_pointer_cast(std::make_shared(message))); } /** * 根据当前帧数据,处理上一帧数据 * inParam : std::shared_ptr pProcessData :当前帧数据 * outParam: N/A * return : N/A */ void TrainStep1FilterEngine::dealProcessDataPre(std::shared_ptr pInferenceResultData) { /* 目标框是否是连续识别,只识别到一帧的目标框认为误识别,过滤掉。 判断上一帧,当前帧 是否有框 上一帧有框,当前帧有框,说明连续识别,正常处理。 上一帧有框,当前帧无框,则非连续识别,过滤大框 上一帧无框,当前帧有框,则连续识别个数置零。 上一帧无框,当前帧无框,则连续识别个数置零。 */ if (!this->pInferenceResultDataPre_) return; int iHeadContinueCnt = 0; int iProContinueCnt = 0; int iNumContinueCnt = 0; int iSpaceContinueCnt = 0; int iTrainSpaceContinueCnt = 0; int iContainerContinueCnt = 0; for (int i = 0; i < pInferenceResultData->vecSingleData.size(); i++) { if (pInferenceResultData->vecSingleData[i].iTargetType == HEAD) { iHeadContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == PRO) { iProContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == NUM) { iNumContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == SPACE) { iSpaceContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == TRAINSPACE) { iTrainSpaceContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == CONTAINER) { iContainerContinueCnt++; } } for (int i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == HEAD) { iHeadContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == PRO) { iProContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == NUM) { iNumContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == SPACE) { iSpaceContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == TRAINSPACE) { iTrainSpaceContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == CONTAINER) { iContainerContinueCnt++; } } //非连续识别的情况,认为误识别,剔除误识别的大框信息 for (std::vector::iterator it = pInferenceResultDataPre_->vecSingleData.begin(); it != pInferenceResultDataPre_->vecSingleData.end();) { if (iHeadContinueCnt < 2 && it->iTargetType == HEAD) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " Head 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iProContinueCnt < 2 && it->iTargetType == PRO) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " PRO 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iNumContinueCnt < 2 && it->iTargetType == NUM) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " NUM 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iSpaceContinueCnt < 2 && it->iTargetType == SPACE) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " SPACE 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iTrainSpaceContinueCnt < 2 && it->iTargetType == TRAINSPACE) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " TRAINSPACE 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } // if (iTrainSpaceContinueCnt < 2 && it->iTargetType == CONTAINER) // { // LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " CONTAINER 框因非连续识别而过滤"; // it = pInferenceResultDataPre_->vecSingleData.erase(it); // continue; // } it++; } //判定行驶方向, 记录Direction文件信息 if (g_come_direction == DIRECTION_UNKNOWN) { LogInfo << "暂未判断出来车方向"; this->calculateDirection(); // if (g_come_direction != DIRECTION_UNKNOWN) this->sendComeTrain(pProcessData->strTrainDate, pProcessData->strTrainName, iDirection_); } if (g_come_direction != DIRECTION_UNKNOWN) { std::string strFilePath = this->baseConfig_.strDebugResultPath + "/" + pInferenceResultDataPre_->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pInferenceResultDataPre_->strTrainTime, ":", "-") + "/" + "jpg/" + std::to_string(pInferenceResultDataPre_->iFrameId) + ".json"; Json::Value jvJsonInfo; if (!this->readJson(strFilePath, jvJsonInfo, 10)) { LogError << "读取JSON失败,需检查是否占用"; } jvJsonInfo["direction"] = g_come_direction.load(); if (!FileUtil::getins()->writeJsonInfo(jvJsonInfo, strFilePath)) { LogError << "来车方向存储失败:" << strFilePath; } else { LogDebug << "来车方向为(1左,2右):" << g_come_direction; } } //主摄像头校验是否停车 int iTrainStatusTemp = iTrainStatus_; iTrainStatus_ = this->getTrainStatus(); iTrainStatusTemp = iTrainStatus_; if (iTrainStatus_ == TRAINSTATUS_STOP) { //停车 } else if (iTrainStatus_ == TRAINSTATUS_BACK) { //倒车 addBackInfo(); iTrainStatusTemp = TRAINSTATUS_STOP; } else if(iTrainStatus_ == TRAINSTATUS_RUN) { /* 正向行驶需先把倒车产生的倒车数据处理完毕,即使车辆回到原倒车点,再开始识别行驶数据 */ if(!this->isEndDealBackInfo()) { iTrainStatusTemp = TRAINSTATUS_STOP; } } LogDebug << " 帧:" << this->pInferenceResultDataPre_->iFrameId << " 火车实时运行状态:" << iTrainStatus_ << "(0无车,1运行,2停车,3倒车) 转换后运行状态:" << iTrainStatusTemp; this->pInferenceResultDataPre_->iTrainStatus = iTrainStatusTemp; // this->sendComeTrain(pProcessData->strTrainDate, pProcessData->strTrainName, iDirection_); //上一帧,push端口0 outputQueMap_[strPort0_]->push(std::static_pointer_cast(pInferenceResultDataPre_)); } bool TrainStep1FilterEngine::readJson(std::string &strFilePath, Json::Value &jvInfo, int i) { i--; if (access(strFilePath.c_str(), F_OK) != 0) { LogWarn << "文件:" << strFilePath << " 不存在"; return i > 0 ? this->readJson(strFilePath, jvInfo, i) : false; } if (!FileUtil::getins()->readJsonInfo(jvInfo, strFilePath)) { LogError << "读取json文件失败:" << strFilePath; return i > 0 ? this->readJson(strFilePath, jvInfo, i) : false; } return true; } APP_ERROR TrainStep1FilterEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } std::shared_ptr pInferenceResultData = std::static_pointer_cast(pVoidData0); // 不识别集装箱的情况下,1帧最多识别4个大框。[(车头、车厢间隔、间隔、车号); (车号、车厢间隔、间隔、属性)] if (pInferenceResultData->vecSingleData.size() > 5 && !this->identifyConfig_.bContainerDetect) { LogWarn << " frameId:" << pInferenceResultData->iFrameId << " 识别到的目标个数超出预期 size:" << pInferenceResultData->vecSingleData.size(); pInferenceResultData->vecSingleData.clear(); continue; } // 识别集装箱的情况下,1帧最多识别5个大框。[(车头、车厢间隔、间隔、车号、集装箱); (车号、车厢间隔、间隔、属性、集装箱)] if (pInferenceResultData->vecSingleData.size() > 5 && this->identifyConfig_.bContainerDetect) { LogWarn << " frameId:" << pInferenceResultData->iFrameId << " 识别到的目标个数超出预期 size:" << pInferenceResultData->vecSingleData.size(); pInferenceResultData->vecSingleData.clear(); continue; } // 根据当前帧数据,处理上一帧数据 this->dealProcessDataPre(pInferenceResultData); this->pInferenceResultDataPre_ = pInferenceResultData; if (pInferenceResultData->bIsEnd) { // 结束帧,push端口0 LogDebug << " frameid:" << pInferenceResultData->iFrameId << " isEnd:" << pInferenceResultData->bIsEnd; outputQueMap_[strPort0_]->push(std::static_pointer_cast(pInferenceResultData), true); this->initParam(); } } return APP_ERR_OK; } \ No newline at end of file +#include "TrainStep1FilterEngine.h" using namespace ai_matrix; //namespace //{ // //按照x坐标排列 // bool CompareX(const SingleData &a, const SingleData &b) // { // return a.fLTX < b.fLTX; // } //} TrainStep1FilterEngine::TrainStep1FilterEngine() {} TrainStep1FilterEngine::~TrainStep1FilterEngine() {} APP_ERROR TrainStep1FilterEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; this->baseConfig_ = Config::getins()->getBaseConfig(); this->dataSourceConfig_ = Config::getins()->getDataSourceConfig(); this->identifyConfig_ = Config::getins()->getIdentifyConfig(); this->mapTargetStr_.insert(std::make_pair(NUM, "NUM")); this->mapTargetStr_.insert(std::make_pair(PRO, "PRO")); this->mapTargetStr_.insert(std::make_pair(HEAD, "HEAD")); this->mapTargetStr_.insert(std::make_pair(SPACE, "SPACE"));//SPACE this->mapTargetStr_.insert(std::make_pair(TRAINSPACE, "SPACE"));//SPACE this->mapTargetStr_.insert(std::make_pair(CONTAINER, "CONTAINER"));//CONTAINER this->initParam(); LogInfo << "TrainStep1FilterEngine Init ok"; return APP_ERR_OK; } APP_ERROR TrainStep1FilterEngine::DeInit() { LogInfo << "TrainStep1FilterEngine DeInit ok"; return APP_ERR_OK; } /** * 参数初始化(列车结束时需调用) */ void TrainStep1FilterEngine::initParam() { this->pInferenceResultDataPre_ = nullptr; this->iNotChgCount_ = 0; while (!this->stackBackInfo_.empty()) { this->stackBackInfo_.pop(); } while (!this->queInferenceResultData_.empty()) { this->queInferenceResultData_.pop(); } iTrainStatus_ = TRAINSTATUS_RUN; g_come_direction = DIRECTION_UNKNOWN; mapCalDirection_.clear(); } void TrainStep1FilterEngine::addBackInfo() { std::string strAllClassType; for (size_t i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { if (strAllClassType.find(this->mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]) != std::string::npos) { continue; } strAllClassType += this->mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]; } if (strAllClassType.empty()) { return; } TrainBackInfo trainBackInfo; trainBackInfo.pInferenceResultData = this->pInferenceResultDataPre_; trainBackInfo.strAllClassType = strAllClassType; if (this->stackBackInfo_.empty()) { this->stackBackInfo_.push(trainBackInfo); LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 新增倒车信息:" << strAllClassType << " 当前数量:" << this->stackBackInfo_.size(); } else { TrainBackInfo trainBackInfoTop = stackBackInfo_.top(); if (trainBackInfoTop.strAllClassType != trainBackInfo.strAllClassType) { if (((g_come_direction == DIRECTION_RIGHT && this->dataSourceConfig_.iRightFirst == RIGHT_RUN_AND_PRO_FIRST) || (g_come_direction == DIRECTION_LEFT && this->dataSourceConfig_.iLeftFirst == LEFT_RUN_AND_PRO_FIRST)) && ((trainBackInfo.strAllClassType == "SPACE" && (trainBackInfoTop.strAllClassType == "PROSPACE" || trainBackInfoTop.strAllClassType == "SPACEPRO")) || (trainBackInfo.strAllClassType == "NUM" && (trainBackInfoTop.strAllClassType == "NUMSPACE" || trainBackInfoTop.strAllClassType == "SPACENUM")) || ((trainBackInfo.strAllClassType == "PROSPACE" || trainBackInfo.strAllClassType == "SPACEPRO") && trainBackInfoTop.strAllClassType == "PRO"))) { return; } if (((g_come_direction == DIRECTION_RIGHT && this->dataSourceConfig_.iRightFirst == RIGHT_RUN_AND_NUM_FIRST) || (g_come_direction == DIRECTION_LEFT && this->dataSourceConfig_.iRightFirst == LEFT_RUN_AND_NUM_FIRST)) && ((trainBackInfo.strAllClassType == "SPACE" && (trainBackInfoTop.strAllClassType == "NUMSPACE" || trainBackInfoTop.strAllClassType == "SPACENUM")) || (trainBackInfo.strAllClassType == "PRO" && (trainBackInfoTop.strAllClassType == "PROSPACE" || trainBackInfoTop.strAllClassType == "SPACEPRO")) || ((trainBackInfo.strAllClassType == "NUMSPACE" || trainBackInfo.strAllClassType == "SPACENUM") && trainBackInfoTop.strAllClassType == "NUM"))) { return; } this->stackBackInfo_.push(trainBackInfo); LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 新增倒车信息:" << strAllClassType << " 当前数量:" << this->stackBackInfo_.size(); } } } bool TrainStep1FilterEngine::isEndDealBackInfo() { if (this->stackBackInfo_.empty()) { return true; } bool bPopFlag = false; if (this->pInferenceResultDataPre_->vecSingleData.size() == 0) return false; /* 处理倒车数据时,数据需设置为倒车,主要是保证这样的数据后面Engine不处理,防止切分车厢出错。 类型不相等时,就pop,当pop后,还剩一个数据时,则表示已经回到了刚开始倒车的地方。(只剩一个数据的逻辑在上方) 处理最后一个时,不能只判断下类型相同就弹出。需要控制下位置。(要么类型相同位置合适,要么类型不相同) 正向为向左行驶,则当前数据的位置尽量小于等于栈中最后一个元素的位置。 正向为向右行驶,则当前数据的位置尽量大于等于栈中最后一个元素的位置。 */ // std::sort(this->pInferenceResultDataPre_->vecSingleData.begin(), this->pInferenceResultDataPre_->vecSingleData.end(), CompareX); std::string strAllClassType; for (size_t i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { if (strAllClassType.find(mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]) != std::string::npos) { continue; } strAllClassType += mapTargetStr_[this->pInferenceResultDataPre_->vecSingleData[i].iTargetType]; } if (strAllClassType.empty()) { return false; } if (stackBackInfo_.size() == 1) { TrainBackInfo trainBackInfoLast = stackBackInfo_.top(); std::shared_ptr pInferenceResultDataBack = std::static_pointer_cast(trainBackInfoLast.pInferenceResultData); // std::sort(pInferenceResultDataBack->vecSingleData.begin(), pInferenceResultDataBack->vecSingleData.end(), CompareX); for (size_t i = 0; i < pInferenceResultDataBack->vecSingleData.size(); i++) { int bFlag = -1; for (size_t j = 0; j < pInferenceResultDataPre_->vecSingleData.size(); j++) { if (pInferenceResultDataBack->vecSingleData[i].iClassId == pInferenceResultDataPre_->vecSingleData[j].iClassId) { if (pInferenceResultDataPre_->vecSingleData[j].fLTX < 1 || pInferenceResultDataBack->vecSingleData[i].fLTX < 1) { LogDebug << "大框X坐标小于1,判定为异常大框。过滤!!"; break; } bFlag = (pInferenceResultDataBack->vecSingleData[i].fLTX <= pInferenceResultDataPre_->vecSingleData[j].fLTX) ? 1 : 0; LogDebug << "帧:" << pInferenceResultDataPre_->iFrameId << " 倒车前帧:" << pInferenceResultDataBack->iFrameId << " 恢复到原位:" << bFlag << " 当前框位置:" << pInferenceResultDataPre_->vecSingleData[i].fLTX << " 倒车前位置:" << pInferenceResultDataBack->vecSingleData[i].fLTX << "方向:" << g_come_direction; } } if ((g_come_direction == DIRECTION_LEFT && bFlag == 0) || (g_come_direction == DIRECTION_RIGHT && bFlag == 1)) { bPopFlag = true; break; } } if (bPopFlag) { LogDebug << "frameId:" << pInferenceResultDataPre_->iFrameId << " 恢复倒车前的位置:" << bPopFlag; stackBackInfo_.pop(); } } else { TrainBackInfo trainBackInfoTop_bak = stackBackInfo_.top(); stackBackInfo_.pop(); TrainBackInfo trainBackInfoTop = stackBackInfo_.top(); if (trainBackInfoTop.strAllClassType != strAllClassType) { stackBackInfo_.push(trainBackInfoTop_bak); LogDebug << "帧:" << pInferenceResultDataPre_->iFrameId << " 倒车信息:" << stackBackInfo_.size() << " 顶部倒车信息:" << trainBackInfoTop.strAllClassType << " 本次识别信息:" << strAllClassType; } else { LogDebug << "帧:" << pInferenceResultDataPre_->iFrameId << " 倒车信息:" << stackBackInfo_.size() << " 顶部倒车信息:" << trainBackInfoTop.strAllClassType << " 本次识别信息:" << strAllClassType << " 删除倒车信息:" << trainBackInfoTop_bak.strAllClassType; } } return stackBackInfo_.empty() ? true : false; } /** * 校验火车是否停止 * return : true:停止; false:非停止 1(正常行驶) 2(停车) 3(倒车) */ int TrainStep1FilterEngine::getTrainStatus() { if (g_come_direction == DIRECTION_UNKNOWN) { LogDebug << " frameId:" << this->pInferenceResultDataPre_->iFrameId << " 未判断出行车方向,暂定认为火车正常行驶中"; return TRAINSTATUS_RUN; } // 无框时,返回之前的列车状态 if (this->pInferenceResultDataPre_->vecSingleData.size() == 0) { return iTrainStatus_; } queInferenceResultData_.push(this->pInferenceResultDataPre_); if (queInferenceResultData_.size() < 3) { return TRAINSTATUS_RUN; } std::shared_ptr pInferenceResultDataFront = queInferenceResultData_.front(); // iNotChgCount_大于0表示有可能停车,此时pop队列数据要多留存几个。用最开始的数据来判断是否真正停车,如果每次只用上上帧判断当列车超级慢时可能判断为停车。 int iSizeTemp = iNotChgCount_ > 0 ? 10 : 2; while (queInferenceResultData_.size() > iSizeTemp) { queInferenceResultData_.pop(); } // LogDebug << "frameId:" << pProcessData->iFrameId << " 判断运动状态队列 第一帧:" << postDataFront.iFrameId << " 队列size:" << quePostData_.size() << " iSizeTemp:" << iSizeTemp; bool bSameFlag = false; for (size_t i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { SingleData singleDataBack = this->pInferenceResultDataPre_->vecSingleData[i]; for (size_t j = 0; j < pInferenceResultDataFront->vecSingleData.size(); j++) { SingleData singleDataFront = pInferenceResultDataFront->vecSingleData[j]; /* 使用iBigClassId,可能出现平车只有间隔大框,且间隔大框可以一会是平车间隔,一会是通用间隔。导致类别不一样 使用iTargetType,可能出现平车只有间隔大框,且间隔大框可以一会是平车间隔,一会是通用间隔。导致像素差判断不准。 */ if (singleDataFront.iTargetType != singleDataBack.iTargetType) { LogDebug << "判断前后帧识别的是否一致 上一个:" << singleDataFront.iTargetType << " 当前:" << singleDataBack.iTargetType; continue; } if (singleDataFront.iTargetType == CONTAINER) continue; bSameFlag = true; int iCenterBack = singleDataBack.fLTX + (singleDataBack.fRBX - singleDataBack.fLTX) / 2; int iCenterFront = singleDataFront.fLTX + (singleDataFront.fRBX - singleDataFront.fLTX) / 2; if (abs(iCenterBack - iCenterFront) > this->identifyConfig_.iChkstopPx) { iNotChgCount_ = 0; /* iCenterBack > iCenterFront 表示向右行驶,且原方向为向左行驶 iCenterBack < iCenterFront 表示向左行驶,且原方向为向右行驶 以上2种表示倒车。 */ if ((iCenterBack > iCenterFront && g_come_direction == DIRECTION_LEFT) || (iCenterBack < iCenterFront && g_come_direction == DIRECTION_RIGHT)) { if (this->identifyConfig_.iPartitionFrameSpan < (this->pInferenceResultDataPre_->iFrameId - pInferenceResultDataFront->iFrameId) && this->identifyConfig_.iSplitFrameSpanPx < abs(iCenterBack - iCenterFront)) { return TRAINSTATUS_RUN; } // LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 检测到火车倒车"; return TRAINSTATUS_BACK; } else { // LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 正常行驶"; return TRAINSTATUS_RUN; } } /* 小于10个像素表示可能停车,累计未变化次数。 累计变化次数超过10次,返回停车 累计变化次数未超过10次,返回之前行驶状态 */ else { iNotChgCount_++; LogDebug << " frameId:" << this->pInferenceResultDataPre_->iFrameId << " 大框移动范围小(" << abs(iCenterBack - iCenterFront) << ") 判断停车计数:" << iNotChgCount_ << "/" << this->identifyConfig_.iChkstopCount; if (iNotChgCount_ > this->identifyConfig_.iChkstopCount) { LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 检测到火车停车"; return TRAINSTATUS_STOP; } else { // LogDebug << "frameId:" << pProcessData->iFrameId << " iTrainStatus_:" << iTrainStatus_; return iTrainStatus_; } } } } /* 未找到相同的框,说明是老框消失掉了,新框出现了。 按新框出现的位置判断是向左行驶,还是向右行驶。 */ // LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " bSameFlag:" << bSameFlag; if (!bSameFlag) { // std::sort(this->pInferenceResultDataPre_->vecSingleData.begin(), this->pInferenceResultDataPre_->vecSingleData.end(), CompareX); SingleData singleData = this->pInferenceResultDataPre_->vecSingleData.front(); if (g_come_direction == DIRECTION_LEFT) { singleData = this->pInferenceResultDataPre_->vecSingleData.back(); } // LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " fLTX:" << singleData.fLTX << " fRBX:" << singleData.fRBX; iNotChgCount_ = 0; int iCenter = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; int iValue = IMAGE_WIDTH / 2; if ((iCenter > iValue && g_come_direction == DIRECTION_RIGHT) || (iCenter < iValue && g_come_direction == DIRECTION_LEFT)) { /* 针对有效帧较少时,和上上帧比较没有同类型大框,且当前帧已行驶到画面中心导致误判的情况, 增加和上帧同类型大框的比较处理。 */ std::shared_ptr pInferenceResultDataMiddle = queInferenceResultData_.front(); for (size_t i = 0; i < pInferenceResultDataPre_->vecSingleData.size(); i++) { SingleData singleDataBack = pInferenceResultDataPre_->vecSingleData[i]; for (size_t j = 0; j < pInferenceResultDataMiddle->vecSingleData.size(); j++) { SingleData singleDataMiddle = pInferenceResultDataMiddle->vecSingleData[j]; if (singleDataMiddle.iTargetType != singleDataBack.iTargetType) { continue; } int iCenterBack = singleDataBack.fLTX + (singleDataBack.fRBX - singleDataBack.fLTX) / 2; int iCenterMiddle = singleDataMiddle.fLTX + (singleDataMiddle.fRBX - singleDataMiddle.fLTX) / 2; // 位置比较大于10个像素,则表示有移动。再判断时正向移动,还是倒车 LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " " << iCenterBack << "-" << iCenterMiddle << "=" << abs(iCenterBack - iCenterMiddle) << " 目标差值:" << this->identifyConfig_.iChkstopPx; if (abs(iCenterBack - iCenterMiddle) > this->identifyConfig_.iChkstopPx) { if ((iCenterBack > iCenterMiddle && g_come_direction == DIRECTION_LEFT) || (iCenterBack < iCenterMiddle && g_come_direction == DIRECTION_RIGHT)) { LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 检测到火车倒车"; return TRAINSTATUS_BACK; } else { LogDebug << "frameId:" << this->pInferenceResultDataPre_->iFrameId << " 正常行驶"; return TRAINSTATUS_RUN; } } } } // LogDebug << "frameId:" << pProcessData->iFrameId << " back2"; return iTrainStatus_; } } // LogDebug << "frameId:" << pProcessData->iFrameId << " iNotChgCount_:" << iNotChgCount_ << " run run"; return TRAINSTATUS_RUN; } /** * 计算行车方向新 */ void TrainStep1FilterEngine::calculateDirection() { /* 连续3帧同目标识别框信息 判断位置差异是否超过10px(判停车参数),且两两之间都是线性。如果符合则计算方向。 上述条件不符合则剔除第一个元素,再次累计连续3帧处理。 */ for (auto iter = this->pInferenceResultDataPre_->vecSingleData.begin(); iter != this->pInferenceResultDataPre_->vecSingleData.end(); iter++) { // 火车车头和集装箱号 暂不参与方向判断 if (iter->iClassId == CONTAINERNUM || iter->iClassId == TRAIN_HEAD) continue; CalculateInfo calInfo; calInfo.iFrameId = this->pInferenceResultDataPre_->iFrameId; calInfo.iBigClassId = iter->iClassId; calInfo.fCenterX = iter->fLTX + (iter->fRBX - iter->fLTX) / 2; calInfo.fTargetWidth = iter->fRBX - iter->fLTX; auto iterSubMap = this->mapCalDirection_.find(iter->iClassId); if (iterSubMap == this->mapCalDirection_.end()) { std::vector vecTemp; this->mapCalDirection_.insert(std::make_pair(iter->iClassId, vecTemp)); iterSubMap = this->mapCalDirection_.find(iter->iClassId); } iterSubMap->second.emplace_back(calInfo); if (iterSubMap->second.size() > 2) { LogDebug << " frameid:" << this->pInferenceResultDataPre_->iFrameId << " last:" << iterSubMap->second.at(2).iFrameId << " " << iterSubMap->second.at(2).fCenterX << " mid:" << iterSubMap->second.at(1).iFrameId << " " << iterSubMap->second.at(1).fCenterX << " pre:" << iterSubMap->second.at(0).iFrameId << " " << iterSubMap->second.at(0).fCenterX; //如果帧号连续,且移动位置大于15px,则计算方向 if (iterSubMap->second.at(2).iFrameId - iterSubMap->second.at(1).iFrameId != 1 || iterSubMap->second.at(1).iFrameId - iterSubMap->second.at(0).iFrameId != 1) { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } if (abs(iterSubMap->second.at(0).fTargetWidth - iterSubMap->second.at(2).fTargetWidth) >= 1.5*this->identifyConfig_.iChkstopPx) { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } int iLast = iterSubMap->second.at(2).fCenterX; int iMid = iterSubMap->second.at(1).fCenterX; int iPre = iterSubMap->second.at(0).fCenterX; if (abs(iPre - iLast) <= this->identifyConfig_.iChkstopPx) { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } if (iPre <= iMid && iMid <= iLast) { g_come_direction = DIRECTION_RIGHT; } else if (iPre >= iMid && iMid >= iLast) { g_come_direction = DIRECTION_LEFT; } else { iterSubMap->second.erase(iterSubMap->second.begin()); continue; } // LogDebug << " frameid:" << pInferenceResultDataPre_->iFrameId << " iDirection_:" << g_come_direction; } } } void TrainStep1FilterEngine::sendComeTrain(const std::string strTrainDate, const std::string strTrainName, const int iDirection) { std::string message = "{\"cometime\":\"" + strTrainDate + " " + strTrainName + "\",\"type\":\"1\",\"direction\":" + std::to_string(iDirection == this->dataSourceConfig_.iDirection ? 1:-1) + "}"; LogWarn << message; outputQueMap_[engineName_ + "_" + std::to_string(engineId_) + "_1"]->push(std::static_pointer_cast(std::make_shared(message))); } /** * 根据当前帧数据,处理上一帧数据 * inParam : std::shared_ptr pProcessData :当前帧数据 * outParam: N/A * return : N/A */ void TrainStep1FilterEngine::dealProcessDataPre(std::shared_ptr pInferenceResultData) { /* 目标框是否是连续识别,只识别到一帧的目标框认为误识别,过滤掉。 判断上一帧,当前帧 是否有框 上一帧有框,当前帧有框,说明连续识别,正常处理。 上一帧有框,当前帧无框,则非连续识别,过滤大框 上一帧无框,当前帧有框,则连续识别个数置零。 上一帧无框,当前帧无框,则连续识别个数置零。 */ if (!this->pInferenceResultDataPre_) return; int iHeadContinueCnt = 0; int iProContinueCnt = 0; int iNumContinueCnt = 0; int iSpaceContinueCnt = 0; int iTrainSpaceContinueCnt = 0; int iContainerContinueCnt = 0; for (int i = 0; i < pInferenceResultData->vecSingleData.size(); i++) { if (pInferenceResultData->vecSingleData[i].iTargetType == HEAD) { iHeadContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == PRO) { iProContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == NUM) { iNumContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == SPACE) { iSpaceContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == TRAINSPACE) { iTrainSpaceContinueCnt++; } else if (pInferenceResultData->vecSingleData[i].iTargetType == CONTAINER) { iContainerContinueCnt++; } } for (int i = 0; i < this->pInferenceResultDataPre_->vecSingleData.size(); i++) { if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == HEAD) { iHeadContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == PRO) { iProContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == NUM) { iNumContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == SPACE) { iSpaceContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == TRAINSPACE) { iTrainSpaceContinueCnt++; } else if (this->pInferenceResultDataPre_->vecSingleData[i].iTargetType == CONTAINER) { iContainerContinueCnt++; } } //非连续识别的情况,认为误识别,剔除误识别的大框信息 for (std::vector::iterator it = pInferenceResultDataPre_->vecSingleData.begin(); it != pInferenceResultDataPre_->vecSingleData.end();) { if (iHeadContinueCnt < 2 && it->iTargetType == HEAD) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " Head 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iProContinueCnt < 2 && it->iTargetType == PRO) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " PRO 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iNumContinueCnt < 2 && it->iTargetType == NUM) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " NUM 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iSpaceContinueCnt < 2 && it->iTargetType == SPACE) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " SPACE 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } if (iTrainSpaceContinueCnt < 2 && it->iTargetType == TRAINSPACE) { LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " TRAINSPACE 框因非连续识别而过滤"; it = pInferenceResultDataPre_->vecSingleData.erase(it); continue; } // if (iTrainSpaceContinueCnt < 2 && it->iTargetType == CONTAINER) // { // LogDebug << " frameId:" << pInferenceResultDataPre_->iFrameId << " CONTAINER 框因非连续识别而过滤"; // it = pInferenceResultDataPre_->vecSingleData.erase(it); // continue; // } it++; } //判定行驶方向, 记录Direction文件信息 if (g_come_direction == DIRECTION_UNKNOWN) { LogInfo << "暂未判断出来车方向"; this->calculateDirection(); // if (g_come_direction != DIRECTION_UNKNOWN) this->sendComeTrain(pProcessData->strTrainDate, pProcessData->strTrainName, iDirection_); } if (g_come_direction != DIRECTION_UNKNOWN) { std::string strFilePath = this->baseConfig_.strDebugResultPath + "/" + pInferenceResultDataPre_->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pInferenceResultDataPre_->strTrainTime, ":", "-") + "/" + "jpg/" + std::to_string(pInferenceResultDataPre_->iFrameId) + ".json"; Json::Value jvJsonInfo; if (!this->readJson(strFilePath, jvJsonInfo, 10)) { LogError << "读取JSON失败,需检查是否占用"; } jvJsonInfo["direction"] = g_come_direction.load(); if (!FileUtil::getins()->writeJsonInfo(jvJsonInfo, strFilePath)) { LogError << "来车方向存储失败:" << strFilePath; } else { LogDebug << "来车方向为(1左,2右):" << g_come_direction; } } //主摄像头校验是否停车 int iTrainStatusTemp = iTrainStatus_; iTrainStatus_ = this->getTrainStatus(); iTrainStatusTemp = iTrainStatus_; if (iTrainStatus_ == TRAINSTATUS_STOP) { //停车 } else if (iTrainStatus_ == TRAINSTATUS_BACK) { //倒车 addBackInfo(); iTrainStatusTemp = TRAINSTATUS_STOP; } else if(iTrainStatus_ == TRAINSTATUS_RUN) { /* 正向行驶需先把倒车产生的倒车数据处理完毕,即使车辆回到原倒车点,再开始识别行驶数据 */ if(!this->isEndDealBackInfo()) { iTrainStatusTemp = TRAINSTATUS_STOP; } } LogDebug << " 帧:" << this->pInferenceResultDataPre_->iFrameId << " 火车实时运行状态:" << iTrainStatus_ << "(0无车,1运行,2停车,3倒车) 转换后运行状态:" << iTrainStatusTemp; this->pInferenceResultDataPre_->iTrainStatus = iTrainStatusTemp; // this->sendComeTrain(pProcessData->strTrainDate, pProcessData->strTrainName, iDirection_); //上一帧,push端口0 outputQueMap_[strPort0_]->push(std::static_pointer_cast(pInferenceResultDataPre_)); } bool TrainStep1FilterEngine::readJson(std::string &strFilePath, Json::Value &jvInfo, int i) { i--; if (access(strFilePath.c_str(), F_OK) != 0) { LogWarn << "文件:" << strFilePath << " 不存在"; return i > 0 ? this->readJson(strFilePath, jvInfo, i) : false; } if (!FileUtil::getins()->readJsonInfo(jvInfo, strFilePath)) { LogError << "读取json文件失败:" << strFilePath; return i > 0 ? this->readJson(strFilePath, jvInfo, i) : false; } return true; } APP_ERROR TrainStep1FilterEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } std::shared_ptr pInferenceResultData = std::static_pointer_cast(pVoidData0); // 不识别集装箱的情况下,1帧最多识别4个大框。[(车头、车厢间隔、间隔、车号); (车号、车厢间隔、间隔、属性)] if (pInferenceResultData->vecSingleData.size() > 5 && !this->identifyConfig_.bContainerDetect) { LogWarn << " frameId:" << pInferenceResultData->iFrameId << " 识别到的目标个数超出预期 size:" << pInferenceResultData->vecSingleData.size(); pInferenceResultData->vecSingleData.clear(); continue; } // 识别集装箱的情况下,1帧最多识别5个大框。[(车头、车厢间隔、间隔、车号、集装箱); (车号、车厢间隔、间隔、属性、集装箱)] if (pInferenceResultData->vecSingleData.size() > 5 && this->identifyConfig_.bContainerDetect) { LogWarn << " frameId:" << pInferenceResultData->iFrameId << " 识别到的目标个数超出预期 size:" << pInferenceResultData->vecSingleData.size(); pInferenceResultData->vecSingleData.clear(); continue; } // 根据当前帧数据,处理上一帧数据 this->dealProcessDataPre(pInferenceResultData); this->pInferenceResultDataPre_ = pInferenceResultData; if (pInferenceResultData->bIsEnd) { // 结束帧,push端口0 LogDebug << " frameid:" << pInferenceResultData->iFrameId << " isEnd:" << pInferenceResultData->bIsEnd; outputQueMap_[strPort0_]->push(std::static_pointer_cast(pInferenceResultData), true); this->initParam(); } } return APP_ERR_OK; } \ No newline at end of file diff --git a/engine/Step1InferenceEngine/TrainStep1InferenceEngine.cpp b/engine/Step1InferenceEngine/TrainStep1InferenceEngine.cpp index 7a5e5eb..8918f52 100644 --- a/engine/Step1InferenceEngine/TrainStep1InferenceEngine.cpp +++ b/engine/Step1InferenceEngine/TrainStep1InferenceEngine.cpp @@ -203,12 +203,12 @@ void TrainStep1InferenceEngine::filterInvalidInfo(std::vector &vecI it->bbox[2] <= this->dataSourceConfig_.vecIdentifyAreas[2] && it->bbox[3] <= this->dataSourceConfig_.vecIdentifyAreas[3])) { - LogDebug << "frameId:" << pVTrainStep1Data->iFrameId - << " 类别:" << it->class_id << " 超出识别区域-识别区域:(" - << this->dataSourceConfig_.vecIdentifyAreas[0] << "," - << this->dataSourceConfig_.vecIdentifyAreas[1] << "),(" - << this->dataSourceConfig_.vecIdentifyAreas[2] << "," - << this->dataSourceConfig_.vecIdentifyAreas[2] << ")"; +// LogDebug << "frameId:" << pVTrainStep1Data->iFrameId +// << " 类别:" << it->class_id << " 超出识别区域-识别区域:(" +// << this->dataSourceConfig_.vecIdentifyAreas[0] << "," +// << this->dataSourceConfig_.vecIdentifyAreas[1] << "),(" +// << this->dataSourceConfig_.vecIdentifyAreas[2] << "," +// << this->dataSourceConfig_.vecIdentifyAreas[2] << ")"; it = vecInferenceResult.erase(it); continue; } @@ -328,7 +328,7 @@ void TrainStep1InferenceEngine::filterInvalidInfo(std::vector &vecI // } //主摄像头1帧如果只识别2个大框,如果非平车的车号和属性场景,则必有间隔框 - if (vecInferenceResult.size() >= 2) + if (vecInferenceResult.size() > 2) { int iHeight0 = vecInferenceResult[0].bbox[1] / 2 + vecInferenceResult[0].bbox[3] / 2; int iHeight1 = vecInferenceResult[1].bbox[1] / 2 + vecInferenceResult[1].bbox[3] / 2; diff --git a/engine/Step2DataReadEngine/TrainStep2DataReadEngine.cpp b/engine/Step2DataReadEngine/TrainStep2DataReadEngine.cpp index a79d0ad..5589809 100644 --- a/engine/Step2DataReadEngine/TrainStep2DataReadEngine.cpp +++ b/engine/Step2DataReadEngine/TrainStep2DataReadEngine.cpp @@ -168,7 +168,6 @@ APP_ERROR TrainStep2DataReadEngine::Process() pVStep2InputData->bIsEnd = !jvFrameInfo.isMember("isEnd") || jvFrameInfo["isEnd"].asBool(); outputQueMap_[strPort0_]->push(std::static_pointer_cast(pVStep2InputData), true); - // outputQueMap_[strPort1_]->push(std::static_pointer_cast(pVStep2InputData), true); } } return APP_ERR_OK; diff --git a/engine/Step2InferenceEngine/TrainStep2InferenceEngine.cpp b/engine/Step2InferenceEngine/TrainStep2InferenceEngine.cpp index 399a2be..7b2aba1 100644 --- a/engine/Step2InferenceEngine/TrainStep2InferenceEngine.cpp +++ b/engine/Step2InferenceEngine/TrainStep2InferenceEngine.cpp @@ -1 +1 @@ -#include "TrainStep2InferenceEngine.h" #include #include "myqueue.h" using namespace ai_matrix; TrainStep2InferenceEngine::TrainStep2InferenceEngine() {} TrainStep2InferenceEngine::~TrainStep2InferenceEngine() {} APP_ERROR TrainStep2InferenceEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; this->modelConfig_ = Config::getins()->getModelByTrainStep2Config(); //读取模型信息 int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK); if (iFolderExist == -1) { LogError << "模型:" << modelConfig_.strModelPath << " 不存在!"; return false; } class_num = this->modelConfig_.vecClass.size(); score_threshold = this->modelConfig_.fScoreThreshold; int ret = initModel(); if (ret != APP_ERR_OK) { LogError << "Failed to read model info, ret = " << ret; return ret; } LogInfo << "TrainStep2InferenceEngine Init ok"; return APP_ERR_OK; } APP_ERROR TrainStep2InferenceEngine::initModel() { modelinfo.yolov5ClearityModelParam.uiClassNum = class_num; modelinfo.yolov5ClearityModelParam.uiClearNum = clear_num; modelinfo.yolov5ClearityModelParam.uiDetSize = det_size; modelinfo.yolov5ClearityModelParam.fScoreThreshold = score_threshold; modelinfo.yolov5ClearityModelParam.fNmsThreshold = nms_threshold; modelinfo.modelCommonInfo.uiModelWidth = model_width; modelinfo.modelCommonInfo.uiModelHeight = model_height; modelinfo.modelCommonInfo.uiInputSize = input_size; modelinfo.modelCommonInfo.uiOutputSize = output_size; modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL; modelinfo.modelCommonInfo.uiBatchSize = batch_size; modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME; modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME; string strModelName = ""; int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo, strModelName, this->modelConfig_.strModelPath); if (nRet != 0) { LogInfo << "YoloV5ClassifyInferenceInit nRet:" << nRet; return APP_ERR_COMM_READ_FAIL; } return APP_ERR_OK; } APP_ERROR TrainStep2InferenceEngine::DeInit() { yolov5model.YoloV5ClearityInferenceDeinit(); LogInfo << "TrainStep2InferenceEngine DeInit ok"; return APP_ERR_OK; } APP_ERROR TrainStep2InferenceEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } std::shared_ptr pVStep2InputData = std::static_pointer_cast(pVoidData0); std::shared_ptr pVStep2OutputData = std::make_shared(); pVStep2OutputData->strTrainDate = pVStep2InputData->strTrainDate; pVStep2OutputData->strTrainTime = pVStep2InputData->strTrainTime; pVStep2OutputData->iFrameId = pVStep2InputData->iFrameId; pVStep2OutputData->bIsEnd = pVStep2InputData->bIsEnd; // LogWarn << "-- 0 -->" << pVStep2InputData->vecSingleData.size(); for (int i = 0; i < pVStep2InputData->vecSingleData.size(); i++) { Step2ResultData step2ResultData; step2ResultData.fLTX = pVStep2InputData->vecSingleData[i].fLTX; step2ResultData.fLTY = pVStep2InputData->vecSingleData[i].fLTY; step2ResultData.fRBX = pVStep2InputData->vecSingleData[i].fRBX; step2ResultData.fRBY = pVStep2InputData->vecSingleData[i].fRBY; step2ResultData.iClassId = pVStep2InputData->vecSingleData[i].iClassId; step2ResultData.fScore = pVStep2InputData->vecSingleData[i].fScore; step2ResultData.iTrainIndex = pVStep2InputData->vecSingleData[i].iTrainIndex; if (pVStep2InputData->vecSingleData[i].iTargetType >= SPACE) { pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData); continue; }; // LogDebug << "frameId:" << pVStep2InputData->iFrameId // << " [" // << pVStep2InputData->vecSingleData[i].fLTX // << "," // << pVStep2InputData->vecSingleData[i].fLTY // << "],[" // << pVStep2InputData->vecSingleData[i].fRBX // << "," // << pVStep2InputData->vecSingleData[i].fRBY // << "]"; cv::Rect rect(cv::Point(pVStep2InputData->vecSingleData[i].fLTX, pVStep2InputData->vecSingleData[i].fLTY), cv::Point(pVStep2InputData->vecSingleData[i].fRBX, pVStep2InputData->vecSingleData[i].fRBY)); cv::Mat image = pVStep2InputData->cvImage(rect).clone(); //进行推理 std::vector vecInferenceResult; auto start = std::chrono::system_clock::now(); // 计时开始 yolov5model.YoloV5ClearityInferenceModel(image, vecInferenceResult, 2); auto end = std::chrono::system_clock::now(); for (int j = 0; j < vecInferenceResult.size(); j++) { SingleData singledata; singledata.iLine = vecInferenceResult[j].clear_id; singledata.iClassId = vecInferenceResult[j].class_id; singledata.fScore = vecInferenceResult[j].class_conf; singledata.fLTX = vecInferenceResult[j].bbox[0]; singledata.fLTY = vecInferenceResult[j].bbox[1]; singledata.fRBX = vecInferenceResult[j].bbox[2]; singledata.fRBY = vecInferenceResult[j].bbox[3]; singledata.fClear = vecInferenceResult[j].clear_id; this->resetLocation(singledata, pVStep2InputData->vecSingleData[i]); step2ResultData.vecSingleData.emplace_back(singledata); // LogDebug << "frameId:" << pVStep2InputData->iFrameId // << " --iClassId:" << singledata.iClassId // << " iLine:" << singledata.iLine // << " score=" << singledata.fScore // << " [" // << singledata.fLTX << "," << singledata.fLTY // << "],[" // << singledata.fRBX << "," << singledata.fRBY // << "]"; } pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData); } outputQueMap_[strPort0_]->push(std::static_pointer_cast(pVStep2OutputData), true); } return APP_ERR_OK; } void TrainStep2InferenceEngine::resetLocation(SingleData &singleData, SingleData &step1SingleData, float fResizeRatio) { singleData.fLTX = singleData.fLTX * fResizeRatio + step1SingleData.fLTX; singleData.fLTY = singleData.fLTY * fResizeRatio + step1SingleData.fLTY; singleData.fRBX = singleData.fRBX * fResizeRatio + step1SingleData.fLTX; singleData.fRBY = singleData.fRBY * fResizeRatio + step1SingleData.fLTY; singleData.fLTX = (singleData.fLTX < IMAGE_WIDTH) ? singleData.fLTX : IMAGE_WIDTH; singleData.fLTY = (singleData.fLTY < IMAGE_HEIGHT) ? singleData.fLTY : IMAGE_HEIGHT; singleData.fRBX = (singleData.fRBX < IMAGE_WIDTH) ? singleData.fRBX : IMAGE_WIDTH; singleData.fRBY = (singleData.fRBY < IMAGE_HEIGHT) ? singleData.fRBY : IMAGE_HEIGHT; } \ No newline at end of file +#include "TrainStep2InferenceEngine.h" #include #include "myqueue.h" using namespace ai_matrix; TrainStep2InferenceEngine::TrainStep2InferenceEngine() {} TrainStep2InferenceEngine::~TrainStep2InferenceEngine() {} APP_ERROR TrainStep2InferenceEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; this->modelConfig_ = Config::getins()->getModelByTrainStep2Config(); //读取模型信息 int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK); if (iFolderExist == -1) { LogError << "模型:" << modelConfig_.strModelPath << " 不存在!"; return false; } class_num = this->modelConfig_.vecClass.size(); score_threshold = this->modelConfig_.fScoreThreshold; int ret = initModel(); if (ret != APP_ERR_OK) { LogError << "Failed to read model info, ret = " << ret; return ret; } LogInfo << "TrainStep2InferenceEngine Init ok"; return APP_ERR_OK; } APP_ERROR TrainStep2InferenceEngine::initModel() { modelinfo.yolov5ClearityModelParam.uiClassNum = class_num; modelinfo.yolov5ClearityModelParam.uiClearNum = clear_num; modelinfo.yolov5ClearityModelParam.uiDetSize = det_size; modelinfo.yolov5ClearityModelParam.fScoreThreshold = score_threshold; modelinfo.yolov5ClearityModelParam.fNmsThreshold = nms_threshold; modelinfo.modelCommonInfo.uiModelWidth = model_width; modelinfo.modelCommonInfo.uiModelHeight = model_height; modelinfo.modelCommonInfo.uiInputSize = input_size; modelinfo.modelCommonInfo.uiOutputSize = output_size; modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL; modelinfo.modelCommonInfo.uiBatchSize = batch_size; modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME; modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME; string strModelName = ""; int nRet = yolov5model.YoloV5ClearityInferenceInit(&modelinfo, strModelName, this->modelConfig_.strModelPath); if (nRet != 0) { LogInfo << "YoloV5ClassifyInferenceInit nRet:" << nRet; return APP_ERR_COMM_READ_FAIL; } return APP_ERR_OK; } APP_ERROR TrainStep2InferenceEngine::DeInit() { yolov5model.YoloV5ClearityInferenceDeinit(); LogInfo << "TrainStep2InferenceEngine DeInit ok"; return APP_ERR_OK; } APP_ERROR TrainStep2InferenceEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { std::shared_ptr pVoidData0 = nullptr; inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); //1ms continue; } std::shared_ptr pVStep2InputData = std::static_pointer_cast(pVoidData0); std::shared_ptr pVStep2OutputData = std::make_shared(); pVStep2OutputData->strTrainDate = pVStep2InputData->strTrainDate; pVStep2OutputData->strTrainTime = pVStep2InputData->strTrainTime; pVStep2OutputData->iFrameId = pVStep2InputData->iFrameId; pVStep2OutputData->bIsEnd = pVStep2InputData->bIsEnd; if (pVStep2InputData->cvImage.empty()) { usleep(1000); //1ms continue; } // LogWarn << "-- 0 -->" << pVStep2InputData->vecSingleData.size(); for (int i = 0; i < pVStep2InputData->vecSingleData.size(); i++) { Step2ResultData step2ResultData; step2ResultData.fLTX = pVStep2InputData->vecSingleData[i].fLTX; step2ResultData.fLTY = pVStep2InputData->vecSingleData[i].fLTY; step2ResultData.fRBX = pVStep2InputData->vecSingleData[i].fRBX; step2ResultData.fRBY = pVStep2InputData->vecSingleData[i].fRBY; step2ResultData.iClassId = pVStep2InputData->vecSingleData[i].iClassId; step2ResultData.fScore = pVStep2InputData->vecSingleData[i].fScore; step2ResultData.iTrainIndex = pVStep2InputData->vecSingleData[i].iTrainIndex; if (pVStep2InputData->vecSingleData[i].iTargetType >= SPACE) { pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData); continue; }; // LogWarn << "frameId:" << pVStep2InputData->iFrameId // << " [" // << pVStep2InputData->vecSingleData[i].fLTX // << "," // << pVStep2InputData->vecSingleData[i].fLTY // << "],[" // << pVStep2InputData->vecSingleData[i].fRBX // << "," // << pVStep2InputData->vecSingleData[i].fRBY // << "]"; cv::Rect rect(cv::Point(pVStep2InputData->vecSingleData[i].fLTX, pVStep2InputData->vecSingleData[i].fLTY), cv::Point(pVStep2InputData->vecSingleData[i].fRBX, pVStep2InputData->vecSingleData[i].fRBY)); cv::Mat image = pVStep2InputData->cvImage(rect).clone(); //进行推理 std::vector vecInferenceResult; auto start = std::chrono::system_clock::now(); // 计时开始 yolov5model.YoloV5ClearityInferenceModel(image, vecInferenceResult, 2); auto end = std::chrono::system_clock::now(); for (int j = 0; j < vecInferenceResult.size(); j++) { SingleData singledata; singledata.iLine = vecInferenceResult[j].clear_id; singledata.iClassId = vecInferenceResult[j].class_id; singledata.fScore = vecInferenceResult[j].class_conf; singledata.fLTX = vecInferenceResult[j].bbox[0]; singledata.fLTY = vecInferenceResult[j].bbox[1]; singledata.fRBX = vecInferenceResult[j].bbox[2]; singledata.fRBY = vecInferenceResult[j].bbox[3]; singledata.fClear = vecInferenceResult[j].clear_id; this->resetLocation(singledata, pVStep2InputData->vecSingleData[i]); step2ResultData.vecSingleData.emplace_back(singledata); // LogDebug << "frameId:" << pVStep2InputData->iFrameId // << " --iClassId:" << singledata.iClassId // << " iLine:" << singledata.iLine // << " score=" << singledata.fScore // << " [" // << singledata.fLTX << "," << singledata.fLTY // << "],[" // << singledata.fRBX << "," << singledata.fRBY // << "]"; } pVStep2OutputData->vecStep2ResultData.emplace_back(step2ResultData); } outputQueMap_[strPort0_]->push(std::static_pointer_cast(pVStep2OutputData), true); } return APP_ERR_OK; } void TrainStep2InferenceEngine::resetLocation(SingleData &singleData, SingleData &step1SingleData, float fResizeRatio) { singleData.fLTX = singleData.fLTX * fResizeRatio + step1SingleData.fLTX; singleData.fLTY = singleData.fLTY * fResizeRatio + step1SingleData.fLTY; singleData.fRBX = singleData.fRBX * fResizeRatio + step1SingleData.fLTX; singleData.fRBY = singleData.fRBY * fResizeRatio + step1SingleData.fLTY; singleData.fLTX = (singleData.fLTX < IMAGE_WIDTH) ? singleData.fLTX : IMAGE_WIDTH; singleData.fLTY = (singleData.fLTY < IMAGE_HEIGHT) ? singleData.fLTY : IMAGE_HEIGHT; singleData.fRBX = (singleData.fRBX < IMAGE_WIDTH) ? singleData.fRBX : IMAGE_WIDTH; singleData.fRBY = (singleData.fRBY < IMAGE_HEIGHT) ? singleData.fRBY : IMAGE_HEIGHT; } \ No newline at end of file diff --git a/engine/TrainDivideEngine/TrainDivideEngine.cpp b/engine/TrainDivideEngine/TrainDivideEngine.cpp index 71b29f9..f8f12ae 100644 --- a/engine/TrainDivideEngine/TrainDivideEngine.cpp +++ b/engine/TrainDivideEngine/TrainDivideEngine.cpp @@ -1 +1 @@ -#include "TrainDivideEngine.h" using namespace ai_matrix; TrainDivideEngine::TrainDivideEngine() {} TrainDivideEngine::~TrainDivideEngine() {} APP_ERROR TrainDivideEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1"; this->baseConfig_ = Config::getins()->getBaseConfig(); this->identifyConfig_ = Config::getins()->getIdentifyConfig(); this->dataSourceConfig_ = Config::getins()->getDataSourceConfig(); InitParam(); LogInfo << "TrainDivideEngine Init ok"; return APP_ERR_OK; } APP_ERROR TrainDivideEngine::DeInit() { LogInfo << "TrainDivideEngine DeInit ok"; return APP_ERR_OK; } /** * 初始化参数信息 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::InitParam() { iPushSpaceFrameId_ = 0; i64TimeStampFirst_ = 0; bPushIsEnd_ = false; vecParationInfo_.clear(); std::vector().swap(vecParationInfo_); bDealCenterFlag_ = false; parationInfoLast_.iSpaceFrame = 0; parationInfoLast_.iCenterX = 0; parationInfoLast_.strTrainDate = ""; parationInfoLast_.strTrainTime = ""; parationInfoLast_.bIsEnd = false; mapNumCenterInfo_.clear(); mapProCenterInfo_.clear(); bHaveHeadFlag_ = false; headInfo_.iFrameId = 0; headInfo_.fCenterX = 0; this->iTrainIndex = 0; } /** * 构造车厢间隔信息 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::makeParationInfo(PartionInfo ¶tionInfo, std::shared_ptr pInferenceResultData, SingleData &singleData) { // parationInfo.i64EndTimeStamp = pInferenceResultData->i64TimeStamp; parationInfo.iSpaceFrame = pInferenceResultData->iFrameId; parationInfo.iCenterX = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; parationInfo.strTrainDate = pInferenceResultData->strTrainDate; parationInfo.strTrainTime = pInferenceResultData->strTrainTime; parationInfo.bIsEnd = pInferenceResultData->bIsEnd; parationInfoLast_ = parationInfo; } /** * 处理中心间隔信息 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::dealCenterSpace(std::vector &vecParationInfo, std::shared_ptr pInferenceResultData) { int iVecSize = vecParationInfo.size(); if (iVecSize < 0) { return; } LogDebug << "积累的车厢切分信息数:" << iVecSize << " 当前帧:" << pInferenceResultData->iFrameId << " 第一个车厢切分帧:" << vecParationInfo.at(0).iSpaceFrame << " 最后一个车厢切分帧:" << vecParationInfo.at(iVecSize - 1).iSpaceFrame << " 最后一个车厢切分帧是否为结束:" << vecParationInfo.at(iVecSize - 1).bIsEnd; /* 因停车后再行驶未能及时判断出为行驶状态,导致更新间隔信息,出现漏切分车厢 漏切分时vecParationInfo中保存的是两辆车的间隔信息,因此针对vecParationInfo做特殊处理,从此处切分出遗漏的车厢。 */ std::vector vecSpacePos; for (int i = 1; i < iVecSize; i++) { bool bIntervalFlag = (vecParationInfo[i].iSpaceFrame - vecParationInfo[i - 1].iSpaceFrame) > this->identifyConfig_.iPartitionFrameSpan; LogDebug << "上一帧ID:" << vecParationInfo[i - 1].iSpaceFrame << " 上一帧间隔X轴中线:" << vecParationInfo[i - 1].iCenterX << " 本帧ID:" << vecParationInfo[i].iSpaceFrame << " 本帧间隔X轴中线:" << vecParationInfo[i].iCenterX << " 满足帧间隔:" << bIntervalFlag << " 累计处理计数i:" << i; if (bIntervalFlag && ( (g_come_direction == DIRECTION_LEFT && vecParationInfo[i - 1].iCenterX < vecParationInfo[i].iCenterX - this->identifyConfig_.iSplitFrameSpanPx) || (g_come_direction == DIRECTION_RIGHT && vecParationInfo[i - 1].iCenterX - this->identifyConfig_.iSplitFrameSpanPx > vecParationInfo[i].iCenterX) )) { vecSpacePos.push_back(i - 1); } } vecSpacePos.push_back(iVecSize - 1); /* 如果集合中最后为列车结束帧,则表示停车在车厢间隔。这时用最后一个作为车厢划分帧。 其他场景使用靠近中心的间隔帧作为车厢划分帧 */ for (int i = 0; i < vecSpacePos.size(); i++) { PartionInfo partionInfo; if (i == vecSpacePos.size() - 1 && vecParationInfo[vecSpacePos.at(i)].bIsEnd) { partionInfo = vecParationInfo[vecSpacePos.at(i)]; } else { int iPos = 0; int iImageCenter = IMAGE_WIDTH / 2; int iToCenterXMin = iImageCenter; int iBegin = (i == 0 ? 0 : vecSpacePos.at(i - 1) + 1); for (int j = iBegin; j <= vecSpacePos.at(i); j++) { if (iToCenterXMin > abs(vecParationInfo[j].iCenterX - iImageCenter)) { iToCenterXMin = abs(vecParationInfo[j].iCenterX - iImageCenter); iPos = j; } } partionInfo = vecParationInfo[iPos]; } //此处切分时,依据车号,属性判断是否有漏的车厢,如果有则依靠车号属性切分。 this->splitTrainByNumPro(partionInfo, pInferenceResultData); this->divideTrain(partionInfo); this->iPushSpaceFrameId_ = partionInfo.iSpaceFrame; this->bPushIsEnd_ = partionInfo.bIsEnd; // LogDebug // << "pushSpaceFrameId:" << this->iPushSpaceFrameId_ // << " bPushIsEnd:" << this->bPushIsEnd_; } vecParationInfo.clear(); } /** * 处理车厢间隔 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::dealTrainSpaceInfo(std::shared_ptr pInferenceResultData, SingleData &singleData) { /* 无方向时,识别的内容都放入集合中。 有向时,按方向判断是否push车厢间隔框 向左行驶:帧间隔中心点小于画面1/3时,则处理车厢间隔集合。新车厢间隔必须大于上次车厢中心点再放入集合等待处理。 向右行驶:帧间隔中心点大于画面2/3时,则处理车厢间隔集合。新车厢间隔必须小于上次车厢中心点再放入集合等待处理。 注:最后一节不会再有帧间隔了,因此直接发送的结束帧,且坐标设置中心点。 */ // 没有识别到间隔时 return if ((singleData.iTargetType != SPACE && singleData.iTargetType != TRAINSPACE) || pInferenceResultData->iFrameId < 30) { return; } // 停车状态且有间隔时需更新最后间隔框帧号。,防止长时间停在间隔上导致集合持续增大。 if (pInferenceResultData->iTrainStatus == TRAINSTATUS_STOP && !pInferenceResultData->bIsEnd) { this->parationInfoLast_.iSpaceFrame = pInferenceResultData->iFrameId; return; } bool bIntervalFlag = (pInferenceResultData->iFrameId - this->parationInfoLast_.iSpaceFrame) > this->identifyConfig_.iPartitionFrameSpan; int iCenterCur = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; LogDebug << "当前帧:" << pInferenceResultData->iFrameId << " 间隔框中心线:" << iCenterCur << " 上一帧:" << this->parationInfoLast_.iSpaceFrame << " 间隔框中心线:" << this->parationInfoLast_.iCenterX << " 行车方向:" << g_come_direction << " 是否满足切分帧数:" << bIntervalFlag << " bDealCenterFlag_:" << this->bDealCenterFlag_; if (g_come_direction == DIRECTION_UNKNOWN || this->parationInfoLast_.iCenterX == 0) { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); this->vecParationInfo_.push_back(parationInfo); } else if (g_come_direction == DIRECTION_LEFT) { if (iCenterCur < (IMAGE_WIDTH / 3)) { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); if (!this->bDealCenterFlag_) { this->vecParationInfo_.push_back(parationInfo); this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); this->bDealCenterFlag_ = true; } } else if ((this->parationInfoLast_.iCenterX < iCenterCur - this->identifyConfig_.iSplitFrameSpanPx) && bIntervalFlag) //该条件只会在新车间隔出来进入一次 { // 防止上节车厢间隔框所有识别都大于画面的1/3,因此当前车厢间隔出来后也需处理下上节车厢间隔 if (!this->bDealCenterFlag_ && this->vecParationInfo_.size() > 0) { this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); } PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); this->vecParationInfo_.push_back(parationInfo); this->bDealCenterFlag_ = false; } else { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); //该条件是防止第一个XXX帧满足小于画面1/3后切割,后一帧XXX+1的中心点大于画面1/3导致的加入vec中出现的多切分现象。(向右增加30px的浮动,因为大框可能不是同一种) if (!(this->bDealCenterFlag_ && !bIntervalFlag && (iCenterCur < (IMAGE_WIDTH / 3 + 80)))) { this->vecParationInfo_.push_back(parationInfo); } } } else if (g_come_direction == DIRECTION_RIGHT) { if (iCenterCur > (IMAGE_WIDTH / 3 * 2)) { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); if (!this->bDealCenterFlag_) { this->vecParationInfo_.push_back(parationInfo); this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); this->bDealCenterFlag_ = true; } } else if ((this->parationInfoLast_.iCenterX - this->identifyConfig_.iSplitFrameSpanPx > iCenterCur) && bIntervalFlag) //该条件只会在新车间隔出来进入一次 { //防止上节车厢间隔所有识别框都小于画面的2/3,因此当前车厢间隔出来后也需处理下上节车厢间隔 if (!this->bDealCenterFlag_ && this->vecParationInfo_.size() > 0) { this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); } PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); this->vecParationInfo_.push_back(parationInfo); this->bDealCenterFlag_ = false; } else { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); //该条件是防止第一个XXX帧满足大于画面2/3后切割,后一帧XXX+1的中心点小于画面2/3导致的加入vec中出现的多切分现象。(向左增加80px的浮动因为大框可能不是同一种) if (!(this->bDealCenterFlag_ && !bIntervalFlag && (iCenterCur > (IMAGE_WIDTH / 3 * 2 - 80)))) { this->vecParationInfo_.push_back(parationInfo); } } } } void TrainDivideEngine::splitTrainByNumPro(PartionInfo &partionInfo, std::shared_ptr &pInferenceResultData) { //向左行驶用车号位置,向右行驶用属性位置。 std::map *pMapCenterInfoTemp_ = nullptr; if (g_come_direction == DIRECTION_LEFT) { pMapCenterInfoTemp_ = &mapNumCenterInfo_; mapProCenterInfo_.clear(); } else if (g_come_direction == DIRECTION_RIGHT) { pMapCenterInfoTemp_ = &mapProCenterInfo_; mapNumCenterInfo_.clear(); } if (pMapCenterInfoTemp_ == nullptr || pMapCenterInfoTemp_->size() <= 0) { return; } auto iter = pMapCenterInfoTemp_->begin(); int iCenterXPre = iter->second; uint32_t iFrameIdPre = iter->first; bool bFlag = false; uint32_t iSplitFrameId = 0; while (++iter != pMapCenterInfoTemp_->end()) { bool bIntervalFlag = (iter->first - iFrameIdPre) > this->identifyConfig_.iPartitionFrameSpan; // LogDebug // << "iFrameIdPre:" << iFrameIdPre // << " iCenterXPre:" << iCenterXPre // << " iFrameid:" << iter->first // << " iCenter:" << iter->second // << " bIntervalFlag:" << bIntervalFlag; if (bIntervalFlag && ( (g_come_direction == DIRECTION_LEFT && iCenterXPre < iter->second - this->identifyConfig_.iSplitFrameSpanPx) || (g_come_direction == DIRECTION_RIGHT && iCenterXPre - this->identifyConfig_.iSplitFrameSpanPx > iter->second) )) { iSplitFrameId = iter->first; bFlag = true; } //比较完后,可更新前一帧数据 iCenterXPre = iter->second; iFrameIdPre = iter->first; if(bFlag) { if ( (g_come_direction == DIRECTION_LEFT && iter->second < (IMAGE_WIDTH / 3)) || (g_come_direction == DIRECTION_RIGHT && iter->second > (IMAGE_WIDTH / 3 * 2)) ) { bFlag = false; } if (!bFlag) { PartionInfo parationInfo_new; parationInfo_new.iSpaceFrame = iSplitFrameId; parationInfo_new.strTrainDate = pInferenceResultData->strTrainDate; parationInfo_new.strTrainTime = pInferenceResultData->strTrainTime; parationInfo_new.iEndframe = iSplitFrameId; parationInfo_new.bIsEnd = false; //**通过该函数切分的肯定不是最后一节 parationInfo_new.bSpaceDivide = false; //构造一个间隔信息写入到切分帧中 std::string strFilePath; strFilePath = this->baseConfig_.strDebugResultPath + "/" + pInferenceResultData->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pInferenceResultData->strTrainTime, ":", "-") + "/" + "jpg/" + std::to_string(iSplitFrameId) + ".json"; Json::Value jvFrameInfo; FileUtil::getins()->readJsonInfo(jvFrameInfo, strFilePath); Json::Value jvOneSpace; jvOneSpace["target_type"] = 5; jvOneSpace["classid"] = 18; jvOneSpace["ltx"] = pMapCenterInfoTemp_->at(iSplitFrameId) + (g_come_direction == DIRECTION_LEFT ? -50 : 50); jvOneSpace["lty"] = 0; jvOneSpace["rbx"] = pMapCenterInfoTemp_->at(iSplitFrameId) + (g_come_direction == DIRECTION_LEFT ? -50 : 50); jvOneSpace["rby"] = 0; jvFrameInfo["step1"].append(jvOneSpace); FileUtil::getins()->writeJsonInfo(jvFrameInfo, strFilePath); this->divideTrain(parationInfo_new); iPushSpaceFrameId_ = parationInfo_new.iSpaceFrame; bPushIsEnd_ = parationInfo_new.bIsEnd; LogDebug << " pushSpaceFrameId:" << iPushSpaceFrameId_ << " bPushIsEnd:" << bPushIsEnd_; while (pMapCenterInfoTemp_->size() > 0) { auto iterDel = pMapCenterInfoTemp_->begin(); if(iterDel->first > iPushSpaceFrameId_) { break; } LogDebug << "erase iFrameId:" << iterDel->first; pMapCenterInfoTemp_->erase(iterDel); } } } if (iter->first >= partionInfo.iSpaceFrame) { LogDebug << "frameid:" << iter->first << " >= pPartionInfo->iSpaceFrame:" << partionInfo.iSpaceFrame << " break"; break; } } while (pMapCenterInfoTemp_->size() > 0) { auto iterDel = pMapCenterInfoTemp_->begin(); if (iterDel->first > partionInfo.iSpaceFrame) { break; } // LogDebug << "erase iFrameId:" << iterDel->first; pMapCenterInfoTemp_->erase(iterDel); } } void TrainDivideEngine::divideTrain(PartionInfo &partionInfo) { partionInfo.iStartframe = this->vecTrainDivideInfo.empty() ? 1 : this->vecTrainDivideInfo.back().iSpaceFrame; partionInfo.iEndframe = partionInfo.iSpaceFrame; this->vecTrainDivideInfo.emplace_back(partionInfo); this->iTrainIndex++; std::string strFilePath; //检测到车厢划分信息 strFilePath = this->baseConfig_.strDebugResultPath + "/" + partionInfo.strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(partionInfo.strTrainTime, ":", "-") + "/" + std::to_string(this->vecTrainDivideInfo.size()) + ".json"; Json::Value jvPartionInfo; jvPartionInfo["trainIndex"] = this->vecTrainDivideInfo.size(); jvPartionInfo["startFrame"] = partionInfo.iStartframe; jvPartionInfo["endFrame"] = partionInfo.iEndframe; jvPartionInfo["spaceDivide"] = partionInfo.bSpaceDivide; partionInfo.iTrainIndex = this->vecTrainDivideInfo.size(); FileUtil::getins()->writeJsonInfo(jvPartionInfo, strFilePath); std::shared_ptr pPartionInfo = std::make_shared(); *pPartionInfo = partionInfo; LogInfo << "--------- 第" << this->vecTrainDivideInfo.size() << "节,车厢切分 --------"; LogDebug << "开始帧:" << partionInfo.iStartframe; LogDebug << "结束帧:" << partionInfo.iEndframe; outputQueMap_[strPort0_]->push(std::static_pointer_cast(pPartionInfo), true); if (partionInfo.bIsEnd) { this->vecTrainDivideInfo.clear(); } } APP_ERROR TrainDivideEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { //pop端口0 std::shared_ptr pVoidData0 = nullptr; iRet = inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); continue; } std::shared_ptr pInferenceResultData = std::static_pointer_cast(pVoidData0); if (pInferenceResultData->bIsEnd) { if (this->vecTrainDivideInfo.empty() && !this->iTrainIndex) continue; //最后一节处理下前一节信息 if (!bDealCenterFlag_ && vecParationInfo_.size() > 0) { LogDebug << "lastFrameid:" << vecParationInfo_[0].iSpaceFrame << " frameid:" << pInferenceResultData->iFrameId; this->dealCenterSpace(vecParationInfo_, pInferenceResultData); } PartionInfo partionInfo; std::shared_ptr pPartionInfo = std::make_shared(); partionInfo.iSpaceFrame = pInferenceResultData->iFrameId; partionInfo.strTrainDate = pInferenceResultData->strTrainDate; partionInfo.strTrainTime = pInferenceResultData->strTrainTime; partionInfo.bIsEnd = pInferenceResultData->bIsEnd; //最后一节和倒数第二节之间的间隔未能识别时,此时也需要通过车号属性切分下。 this->splitTrainByNumPro(partionInfo, pInferenceResultData); this->divideTrain(partionInfo); iPushSpaceFrameId_ = partionInfo.iSpaceFrame; if (!bPushIsEnd_) { bPushIsEnd_ = partionInfo.bIsEnd; // LogDebug << "pushSpaceFrameId:" << iPushSpaceFrameId_ // << " bPushIsEnd:" << bPushIsEnd_; } InitParam(); } std::string strFilePath = ""; strFilePath = this->baseConfig_.strDebugResultPath + "/" + pInferenceResultData->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pInferenceResultData->strTrainTime, ":", "-") + "/" + "jpg/" + std::to_string(pInferenceResultData->iFrameId) + ".json"; // 先读取文本内容,追加新的信息后再写入 Json::Value jvFrameInfo; if (!FileUtil::getins()->readJsonInfo(jvFrameInfo, strFilePath)) { LogError << "read fail:" << strFilePath; } jvFrameInfo["isEnd"] = (pInferenceResultData->bIsEnd || jvFrameInfo["isEnd"].asBool()); jvFrameInfo["train_status"] = pInferenceResultData->iTrainStatus; SingleData singleData_sapce; // 遍历识别结果 for (int i = 0; i < pInferenceResultData->vecSingleData.size(); i++) { SingleData singleData = pInferenceResultData->vecSingleData[i]; float fCenter = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; if (pInferenceResultData->iTrainStatus != TRAINSTATUS_STOP) { switch (singleData.iTargetType) { case HEAD: // 车头没有属性,因此车头号也加入到属性中 // 车头只加入一次,防止一个车头2个车头号的场景。但有两个车头且没识别车头间隔则无法处理。 if (!bHaveHeadFlag_) { bool bIntervalFlag = ((pInferenceResultData->iFrameId - headInfo_.iFrameId) > this->identifyConfig_.iPartitionFrameSpan && headInfo_.iFrameId != 0); LogDebug << "车头帧:" << pInferenceResultData->iFrameId << " 中心:" << fCenter << " 上一帧:" << headInfo_.iFrameId << " 上个中心:" << headInfo_.fCenterX << " 是否满足帧跨度:" << bIntervalFlag; if ((bIntervalFlag && abs((int)(headInfo_.fCenterX - fCenter)) > this->identifyConfig_.iSplitFrameSpanPx)) { bHaveHeadFlag_ = true; } else { headInfo_.fCenterX = fCenter; headInfo_.iFrameId = pInferenceResultData->iFrameId; mapNumCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); mapProCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); } } break; case NUM: mapNumCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); break; case PRO: mapProCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); break; case SPACE: singleData_sapce = singleData; break; case TRAINSPACE: if (singleData_sapce.iTargetType != SPACE) { singleData_sapce = singleData; } break; case CONTAINER: break; default: break; } } Json::Value jvInfo; jvInfo["target_type"] = singleData.iTargetType; jvInfo["classid"] = singleData.iClassId; jvInfo["score"] = singleData.fScore; jvInfo["clear"] = singleData.fClear; jvInfo["ltx"] = singleData.fLTX; jvInfo["lty"] = singleData.fLTY; jvInfo["rbx"] = singleData.fRBX; jvInfo["rby"] = singleData.fRBY; if (singleData.iTargetType == singleData_sapce.iTargetType) { jvFrameInfo["divide_space"] = jvInfo; } jvFrameInfo["step1"].append(jvInfo); } FileUtil::getins()->writeJsonInfo(jvFrameInfo, strFilePath); if (pInferenceResultData->vecSingleData.empty()) continue; this->dealTrainSpaceInfo(pInferenceResultData, singleData_sapce); } return APP_ERR_OK; } \ No newline at end of file +#include "TrainDivideEngine.h" using namespace ai_matrix; TrainDivideEngine::TrainDivideEngine() {} TrainDivideEngine::~TrainDivideEngine() {} APP_ERROR TrainDivideEngine::Init() { strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0"; strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1"; this->baseConfig_ = Config::getins()->getBaseConfig(); this->identifyConfig_ = Config::getins()->getIdentifyConfig(); this->dataSourceConfig_ = Config::getins()->getDataSourceConfig(); InitParam(); LogInfo << "TrainDivideEngine Init ok"; return APP_ERR_OK; } APP_ERROR TrainDivideEngine::DeInit() { LogInfo << "TrainDivideEngine DeInit ok"; return APP_ERR_OK; } /** * 初始化参数信息 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::InitParam() { iPushSpaceFrameId_ = 0; i64TimeStampFirst_ = 0; bPushIsEnd_ = false; vecParationInfo_.clear(); std::vector().swap(vecParationInfo_); bDealCenterFlag_ = false; parationInfoLast_.iSpaceFrame = 0; parationInfoLast_.iCenterX = 0; parationInfoLast_.strTrainDate = ""; parationInfoLast_.strTrainTime = ""; parationInfoLast_.bIsEnd = false; mapNumCenterInfo_.clear(); mapProCenterInfo_.clear(); bHaveHeadFlag_ = false; headInfo_.iFrameId = 0; headInfo_.fCenterX = 0; this->iTrainIndex = 0; } /** * 构造车厢间隔信息 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::makeParationInfo(PartionInfo ¶tionInfo, std::shared_ptr pInferenceResultData, SingleData &singleData) { // parationInfo.i64EndTimeStamp = pInferenceResultData->i64TimeStamp; parationInfo.iSpaceFrame = pInferenceResultData->iFrameId; parationInfo.iCenterX = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; parationInfo.strTrainDate = pInferenceResultData->strTrainDate; parationInfo.strTrainTime = pInferenceResultData->strTrainTime; parationInfo.bIsEnd = pInferenceResultData->bIsEnd; parationInfoLast_ = parationInfo; } /** * 处理中心间隔信息 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::dealCenterSpace(std::vector &vecParationInfo, std::shared_ptr pInferenceResultData) { int iVecSize = vecParationInfo.size(); if (iVecSize < 0) { return; } LogDebug << "积累的车厢切分信息数:" << iVecSize << " 当前帧:" << pInferenceResultData->iFrameId << " 第一个车厢切分帧:" << vecParationInfo.at(0).iSpaceFrame << " 最后一个车厢切分帧:" << vecParationInfo.at(iVecSize - 1).iSpaceFrame << " 最后一个车厢切分帧是否为结束:" << vecParationInfo.at(iVecSize - 1).bIsEnd; /* 因停车后再行驶未能及时判断出为行驶状态,导致更新间隔信息,出现漏切分车厢 漏切分时vecParationInfo中保存的是两辆车的间隔信息,因此针对vecParationInfo做特殊处理,从此处切分出遗漏的车厢。 */ std::vector vecSpacePos; for (int i = 1; i < iVecSize; i++) { bool bIntervalFlag = (vecParationInfo[i].iSpaceFrame - vecParationInfo[i - 1].iSpaceFrame) > this->identifyConfig_.iPartitionFrameSpan; LogDebug << "上一帧ID:" << vecParationInfo[i - 1].iSpaceFrame << " 上一帧间隔X轴中线:" << vecParationInfo[i - 1].iCenterX << " 本帧ID:" << vecParationInfo[i].iSpaceFrame << " 本帧间隔X轴中线:" << vecParationInfo[i].iCenterX << " 满足帧间隔:" << bIntervalFlag << " 累计处理计数i:" << i; if (bIntervalFlag && ( (g_come_direction == DIRECTION_LEFT && vecParationInfo[i - 1].iCenterX < vecParationInfo[i].iCenterX - this->identifyConfig_.iSplitFrameSpanPx) || (g_come_direction == DIRECTION_RIGHT && vecParationInfo[i - 1].iCenterX - this->identifyConfig_.iSplitFrameSpanPx > vecParationInfo[i].iCenterX) )) { vecSpacePos.push_back(i - 1); } } vecSpacePos.push_back(iVecSize - 1); /* 如果集合中最后为列车结束帧,则表示停车在车厢间隔。这时用最后一个作为车厢划分帧。 其他场景使用靠近中心的间隔帧作为车厢划分帧 */ for (int i = 0; i < vecSpacePos.size(); i++) { PartionInfo partionInfo; if (i == vecSpacePos.size() - 1 && vecParationInfo[vecSpacePos.at(i)].bIsEnd) { partionInfo = vecParationInfo[vecSpacePos.at(i)]; } else { int iPos = 0; int iImageCenter = IMAGE_WIDTH / 2; int iToCenterXMin = iImageCenter; int iBegin = (i == 0 ? 0 : vecSpacePos.at(i - 1) + 1); for (int j = iBegin; j <= vecSpacePos.at(i); j++) { if (iToCenterXMin > abs(vecParationInfo[j].iCenterX - iImageCenter)) { iToCenterXMin = abs(vecParationInfo[j].iCenterX - iImageCenter); iPos = j; } } partionInfo = vecParationInfo[iPos]; } //此处切分时,依据车号,属性判断是否有漏的车厢,如果有则依靠车号属性切分。 this->splitTrainByNumPro(partionInfo, pInferenceResultData); this->divideTrain(partionInfo); this->iPushSpaceFrameId_ = partionInfo.iSpaceFrame; this->bPushIsEnd_ = partionInfo.bIsEnd; // LogDebug // << "pushSpaceFrameId:" << this->iPushSpaceFrameId_ // << " bPushIsEnd:" << this->bPushIsEnd_; } vecParationInfo.clear(); } /** * 处理车厢间隔 * inParam : N/A * outParam: N/A * return : N/A */ void TrainDivideEngine::dealTrainSpaceInfo(std::shared_ptr pInferenceResultData, SingleData &singleData) { /* 无方向时,识别的内容都放入集合中。 有向时,按方向判断是否push车厢间隔框 向左行驶:帧间隔中心点小于画面1/3时,则处理车厢间隔集合。新车厢间隔必须大于上次车厢中心点再放入集合等待处理。 向右行驶:帧间隔中心点大于画面2/3时,则处理车厢间隔集合。新车厢间隔必须小于上次车厢中心点再放入集合等待处理。 注:最后一节不会再有帧间隔了,因此直接发送的结束帧,且坐标设置中心点。 */ // 没有识别到间隔时 return if ((singleData.iTargetType != SPACE && singleData.iTargetType != TRAINSPACE) || pInferenceResultData->iFrameId < 30) { return; } // 停车状态且有间隔时需更新最后间隔框帧号。,防止长时间停在间隔上导致集合持续增大。 if (pInferenceResultData->iTrainStatus == TRAINSTATUS_STOP && !pInferenceResultData->bIsEnd) { this->parationInfoLast_.iSpaceFrame = pInferenceResultData->iFrameId; return; } bool bIntervalFlag = (pInferenceResultData->iFrameId - this->parationInfoLast_.iSpaceFrame) > this->identifyConfig_.iPartitionFrameSpan; int iCenterCur = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; LogDebug << "当前帧:" << pInferenceResultData->iFrameId << " 间隔框中心线:" << iCenterCur << " 上一帧:" << this->parationInfoLast_.iSpaceFrame << " 间隔框中心线:" << this->parationInfoLast_.iCenterX << " 行车方向:" << g_come_direction << " 是否满足切分帧数:" << bIntervalFlag << " bDealCenterFlag_:" << this->bDealCenterFlag_; if (g_come_direction == DIRECTION_UNKNOWN || this->parationInfoLast_.iCenterX == 0) { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); this->vecParationInfo_.push_back(parationInfo); } else if (g_come_direction == DIRECTION_LEFT) { if (iCenterCur < (IMAGE_WIDTH / 3)) { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); if (!this->bDealCenterFlag_) { this->vecParationInfo_.push_back(parationInfo); this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); this->bDealCenterFlag_ = true; } } else if ((this->parationInfoLast_.iCenterX < iCenterCur - this->identifyConfig_.iSplitFrameSpanPx) && bIntervalFlag) //该条件只会在新车间隔出来进入一次 { // 防止上节车厢间隔框所有识别都大于画面的1/3,因此当前车厢间隔出来后也需处理下上节车厢间隔 if (!this->bDealCenterFlag_ && this->vecParationInfo_.size() > 0) { this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); } PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); this->vecParationInfo_.push_back(parationInfo); this->bDealCenterFlag_ = false; } else { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); //该条件是防止第一个XXX帧满足小于画面1/3后切割,后一帧XXX+1的中心点大于画面1/3导致的加入vec中出现的多切分现象。(向右增加30px的浮动,因为大框可能不是同一种) if (!(this->bDealCenterFlag_ && !bIntervalFlag && (iCenterCur < (IMAGE_WIDTH / 3 + 80)))) { this->vecParationInfo_.push_back(parationInfo); } } } else if (g_come_direction == DIRECTION_RIGHT) { if (iCenterCur > (IMAGE_WIDTH / 3 * 2)) { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); if (!this->bDealCenterFlag_) { this->vecParationInfo_.push_back(parationInfo); this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); this->bDealCenterFlag_ = true; } } else if ((this->parationInfoLast_.iCenterX - this->identifyConfig_.iSplitFrameSpanPx > iCenterCur) && bIntervalFlag) //该条件只会在新车间隔出来进入一次 { //防止上节车厢间隔所有识别框都小于画面的2/3,因此当前车厢间隔出来后也需处理下上节车厢间隔 if (!this->bDealCenterFlag_ && this->vecParationInfo_.size() > 0) { this->dealCenterSpace(this->vecParationInfo_, pInferenceResultData); } PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); this->vecParationInfo_.push_back(parationInfo); this->bDealCenterFlag_ = false; } else { PartionInfo parationInfo; this->makeParationInfo(parationInfo, pInferenceResultData, singleData); //该条件是防止第一个XXX帧满足大于画面2/3后切割,后一帧XXX+1的中心点小于画面2/3导致的加入vec中出现的多切分现象。(向左增加80px的浮动因为大框可能不是同一种) if (!(this->bDealCenterFlag_ && !bIntervalFlag && (iCenterCur > (IMAGE_WIDTH / 3 * 2 - 80)))) { this->vecParationInfo_.push_back(parationInfo); } } } } void TrainDivideEngine::splitTrainByNumPro(PartionInfo &partionInfo, std::shared_ptr &pInferenceResultData) { //向左行驶用车号位置,向右行驶用属性位置。 std::map *pMapCenterInfoTemp_ = nullptr; if (g_come_direction == DIRECTION_LEFT) { pMapCenterInfoTemp_ = &mapNumCenterInfo_; mapProCenterInfo_.clear(); } else if (g_come_direction == DIRECTION_RIGHT) { pMapCenterInfoTemp_ = &mapProCenterInfo_; mapNumCenterInfo_.clear(); } if (pMapCenterInfoTemp_ == nullptr || pMapCenterInfoTemp_->size() <= 0) { return; } auto iter = pMapCenterInfoTemp_->begin(); int iCenterXPre = iter->second; uint32_t iFrameIdPre = iter->first; bool bFlag = false; uint32_t iSplitFrameId = 0; while (++iter != pMapCenterInfoTemp_->end()) { bool bIntervalFlag = (iter->first - iFrameIdPre) > this->identifyConfig_.iPartitionFrameSpan; // LogDebug // << "iFrameIdPre:" << iFrameIdPre // << " iCenterXPre:" << iCenterXPre // << " iFrameid:" << iter->first // << " iCenter:" << iter->second // << " bIntervalFlag:" << bIntervalFlag; if (bIntervalFlag && ( (g_come_direction == DIRECTION_LEFT && iCenterXPre < iter->second - this->identifyConfig_.iSplitFrameSpanPx) || (g_come_direction == DIRECTION_RIGHT && iCenterXPre - this->identifyConfig_.iSplitFrameSpanPx > iter->second) )) { iSplitFrameId = iter->first; bFlag = true; } //比较完后,可更新前一帧数据 iCenterXPre = iter->second; iFrameIdPre = iter->first; if(bFlag) { if ( (g_come_direction == DIRECTION_LEFT && iter->second < (IMAGE_WIDTH / 3)) || (g_come_direction == DIRECTION_RIGHT && iter->second > (IMAGE_WIDTH / 3 * 2)) ) { bFlag = false; } if (!bFlag) { PartionInfo parationInfo_new; parationInfo_new.iSpaceFrame = iSplitFrameId; parationInfo_new.strTrainDate = pInferenceResultData->strTrainDate; parationInfo_new.strTrainTime = pInferenceResultData->strTrainTime; parationInfo_new.iEndframe = iSplitFrameId; parationInfo_new.bIsEnd = false; //**通过该函数切分的肯定不是最后一节 parationInfo_new.bSpaceDivide = false; //构造一个间隔信息写入到切分帧中 std::string strFilePath; strFilePath = this->baseConfig_.strDebugResultPath + "/" + pInferenceResultData->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pInferenceResultData->strTrainTime, ":", "-") + "/" + "jpg/" + std::to_string(iSplitFrameId) + ".json"; Json::Value jvFrameInfo; FileUtil::getins()->readJsonInfo(jvFrameInfo, strFilePath); Json::Value jvOneSpace; jvOneSpace["target_type"] = 5; jvOneSpace["classid"] = 18; jvOneSpace["ltx"] = pMapCenterInfoTemp_->at(iSplitFrameId) + (g_come_direction == DIRECTION_LEFT ? -50 : 50); jvOneSpace["lty"] = 0; jvOneSpace["rbx"] = pMapCenterInfoTemp_->at(iSplitFrameId) + (g_come_direction == DIRECTION_LEFT ? -50 : 50); jvOneSpace["rby"] = 0; jvFrameInfo["step1"].append(jvOneSpace); FileUtil::getins()->writeJsonInfo(jvFrameInfo, strFilePath); this->divideTrain(parationInfo_new); iPushSpaceFrameId_ = parationInfo_new.iSpaceFrame; bPushIsEnd_ = parationInfo_new.bIsEnd; LogDebug << " pushSpaceFrameId:" << iPushSpaceFrameId_ << " bPushIsEnd:" << bPushIsEnd_; while (pMapCenterInfoTemp_->size() > 0) { auto iterDel = pMapCenterInfoTemp_->begin(); if(iterDel->first > iPushSpaceFrameId_) { break; } LogDebug << "erase iFrameId:" << iterDel->first; pMapCenterInfoTemp_->erase(iterDel); } } } if (iter->first >= partionInfo.iSpaceFrame) { LogDebug << "frameid:" << iter->first << " >= pPartionInfo->iSpaceFrame:" << partionInfo.iSpaceFrame << " break"; break; } } while (pMapCenterInfoTemp_->size() > 0) { auto iterDel = pMapCenterInfoTemp_->begin(); if (iterDel->first > partionInfo.iSpaceFrame) { break; } // LogDebug << "erase iFrameId:" << iterDel->first; pMapCenterInfoTemp_->erase(iterDel); } } void TrainDivideEngine::divideTrain(PartionInfo &partionInfo) { partionInfo.iStartframe = this->vecTrainDivideInfo.empty() ? 1 : this->vecTrainDivideInfo.back().iSpaceFrame; partionInfo.iEndframe = partionInfo.iSpaceFrame; this->vecTrainDivideInfo.emplace_back(partionInfo); this->iTrainIndex++; std::string strFilePath; //检测到车厢划分信息 strFilePath = this->baseConfig_.strDebugResultPath + "/" + partionInfo.strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(partionInfo.strTrainTime, ":", "-") + "/" + std::to_string(this->vecTrainDivideInfo.size()) + ".json"; Json::Value jvPartionInfo; jvPartionInfo["trainIndex"] = this->vecTrainDivideInfo.size(); jvPartionInfo["startFrame"] = partionInfo.iStartframe; jvPartionInfo["endFrame"] = partionInfo.iEndframe; jvPartionInfo["spaceDivide"] = partionInfo.bSpaceDivide; partionInfo.iTrainIndex = this->vecTrainDivideInfo.size(); FileUtil::getins()->writeJsonInfo(jvPartionInfo, strFilePath); std::shared_ptr pPartionInfo = std::make_shared(); *pPartionInfo = partionInfo; LogInfo << "--------- 第" << this->vecTrainDivideInfo.size() << "节,车厢切分 --------"; LogDebug << "开始帧:" << partionInfo.iStartframe; LogDebug << "结束帧:" << partionInfo.iEndframe; outputQueMap_[strPort0_]->push(std::static_pointer_cast(pPartionInfo), true); if (partionInfo.bIsEnd) { this->vecTrainDivideInfo.clear(); } } APP_ERROR TrainDivideEngine::Process() { int iRet = APP_ERR_OK; while (!isStop_) { //pop端口0 std::shared_ptr pVoidData0 = nullptr; iRet = inputQueMap_[strPort0_]->pop(pVoidData0); if (nullptr == pVoidData0) { usleep(1000); continue; } std::shared_ptr pInferenceResultData = std::static_pointer_cast(pVoidData0); if (pInferenceResultData->bIsEnd) { if (this->vecTrainDivideInfo.empty() && !this->iTrainIndex) continue; //最后一节处理下前一节信息 if (!bDealCenterFlag_ && vecParationInfo_.size() > 0) { LogDebug << "lastFrameid:" << vecParationInfo_[0].iSpaceFrame << " frameid:" << pInferenceResultData->iFrameId; this->dealCenterSpace(vecParationInfo_, pInferenceResultData); } PartionInfo partionInfo; std::shared_ptr pPartionInfo = std::make_shared(); partionInfo.iSpaceFrame = pInferenceResultData->iFrameId; partionInfo.strTrainDate = pInferenceResultData->strTrainDate; partionInfo.strTrainTime = pInferenceResultData->strTrainTime; partionInfo.bIsEnd = pInferenceResultData->bIsEnd; //最后一节和倒数第二节之间的间隔未能识别时,此时也需要通过车号属性切分下。 this->splitTrainByNumPro(partionInfo, pInferenceResultData); this->divideTrain(partionInfo); iPushSpaceFrameId_ = partionInfo.iSpaceFrame; if (!bPushIsEnd_) { bPushIsEnd_ = partionInfo.bIsEnd; // LogDebug << "pushSpaceFrameId:" << iPushSpaceFrameId_ // << " bPushIsEnd:" << bPushIsEnd_; } InitParam(); } std::string strFilePath = ""; strFilePath = this->baseConfig_.strDebugResultPath + "/" + pInferenceResultData->strTrainDate + "/" + StringUtil::getins()->replace_all_distinct(pInferenceResultData->strTrainTime, ":", "-") + "/" + "jpg/" + std::to_string(pInferenceResultData->iFrameId) + ".json"; // 先读取文本内容,追加新的信息后再写入 Json::Value jvFrameInfo; if (!FileUtil::getins()->readJsonInfo(jvFrameInfo, strFilePath)) { LogError << "read fail:" << strFilePath; } jvFrameInfo["isEnd"] = (pInferenceResultData->bIsEnd || jvFrameInfo["isEnd"].asBool()); jvFrameInfo["train_status"] = pInferenceResultData->iTrainStatus; SingleData singleData_sapce; // 遍历识别结果 for (int i = 0; i < pInferenceResultData->vecSingleData.size(); i++) { SingleData singleData = pInferenceResultData->vecSingleData[i]; float fCenter = singleData.fLTX + (singleData.fRBX - singleData.fLTX) / 2; if (pInferenceResultData->iTrainStatus != TRAINSTATUS_STOP) { switch (singleData.iTargetType) { case HEAD: // 车头没有属性,因此车头号也加入到属性中 // 车头只加入一次,防止一个车头2个车头号的场景。但有两个车头且没识别车头间隔则无法处理。 if (!bHaveHeadFlag_) { bool bIntervalFlag = ((pInferenceResultData->iFrameId - headInfo_.iFrameId) > this->identifyConfig_.iPartitionFrameSpan && headInfo_.iFrameId != 0); LogDebug << "车头帧:" << pInferenceResultData->iFrameId << " 中心:" << fCenter << " 上一帧:" << headInfo_.iFrameId << " 上个中心:" << headInfo_.fCenterX << " 是否满足帧跨度:" << bIntervalFlag; if ((bIntervalFlag && abs((int)(headInfo_.fCenterX - fCenter)) > this->identifyConfig_.iSplitFrameSpanPx)) { bHaveHeadFlag_ = true; } else { headInfo_.fCenterX = fCenter; headInfo_.iFrameId = pInferenceResultData->iFrameId; mapNumCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); mapProCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); } } break; case NUM: mapNumCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); break; case PRO: mapProCenterInfo_.insert(std::make_pair(pInferenceResultData->iFrameId, fCenter)); break; case SPACE: singleData_sapce = singleData; break; case TRAINSPACE: if (singleData_sapce.iTargetType != SPACE) { singleData_sapce = singleData; } break; case CONTAINER: break; default: break; } } Json::Value jvInfo; jvInfo["target_type"] = singleData.iTargetType; jvInfo["classid"] = singleData.iClassId; jvInfo["score"] = singleData.fScore; jvInfo["clear"] = singleData.fClear; jvInfo["ltx"] = singleData.fLTX; jvInfo["lty"] = singleData.fLTY; jvInfo["rbx"] = singleData.fRBX; jvInfo["rby"] = singleData.fRBY; if (singleData.iTargetType == singleData_sapce.iTargetType) { jvFrameInfo["divide_space"] = jvInfo; } jvFrameInfo["step1"].append(jvInfo); } if (pInferenceResultData->vecSingleData.empty()) continue; FileUtil::getins()->writeJsonInfo(jvFrameInfo, strFilePath); this->dealTrainSpaceInfo(pInferenceResultData, singleData_sapce); } return APP_ERR_OK; } \ No newline at end of file diff --git a/tools/yolov5/src/inference.cu b/tools/yolov5/src/inference.cu index e894fb8..868b75a 100644 --- a/tools/yolov5/src/inference.cu +++ b/tools/yolov5/src/inference.cu @@ -132,7 +132,7 @@ void Inference::doInferenceV3(IExecutionContext& context, cudaStream_t& stream, //执行推理4 void Inference::doInferenceV4(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int outputIndex, float* output, int outputSize, int batchSize) { - context.enqueueV2(buffers, stream, nullptr); + context.enqueueV2(buffers, stream, nullptr); CUDA_CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * outputSize * sizeof(float), cudaMemcpyDeviceToHost, stream)); cudaStreamSynchronize(stream); }