Compare commits

...

2 Commits

Author SHA1 Message Date
Mr.V 829d3e4790 新增引擎 2024-08-09 17:56:38 +08:00
Mr.V fe8de3ab8e 新增引擎 2024-08-08 15:44:57 +08:00
27 changed files with 2250 additions and 127 deletions

View File

@ -2,9 +2,9 @@
cmake_minimum_required(VERSION 3.21)
# project information
project(train_RFID_Linux)
project(train)
project(${PROJECT_NAME} VERSION 0.1 DESCRIPTION "RFID识别程序 Linux版")
project(${PROJECT_NAME} VERSION 0.1 DESCRIPTION "视频车号识别对外接口模块")
add_definitions(-std=c++11)
add_definitions(-DAPI_EXPORTS)
@ -39,12 +39,12 @@ include_directories(${Boost_INCLUDE_DIRS})
message(STATUS "Using Boost ${Boost_VERSION}")
# opencv
#find_package(OpenCV REQUIRED)
#message(STATUS "Using OpenCV ${OpenCV_VERSION}")
find_package(OpenCV REQUIRED)
message(STATUS "Using OpenCV ${OpenCV_VERSION}")
# CUDA
#find_package(CUDA REQUIRED)
#message(STATUS "Using CUDA ${CUDA_VERSION}")
find_package(CUDA REQUIRED)
message(STATUS "Using CUDA ${CUDA_VERSION}")
#
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb")
@ -60,17 +60,17 @@ set(X86_LINUX_INCLUDE_DIR "/usr/include/x86_64-linux-gnu")
set(X86_LINUX_LIB_DIR "/usr/lib/x86_64-linux-gnu")
#OpenCV
#set(OPENCV_INCLUDE_DIR ${SYS_USR_LOCAL_INCLUDE_DIR}/opencv4)
#set(OPENCV_LIB_DIR ${SYS_USR_LOCAL_LIB_DIR})
set(OPENCV_INCLUDE_DIR ${SYS_USR_LOCAL_INCLUDE_DIR}/opencv4)
set(OPENCV_LIB_DIR ${SYS_USR_LOCAL_LIB_DIR})
#CUDA
#set(CUDA_DIR "/usr/local/cuda-11.7")
#set(CUDA_INCLUDE_DIR ${CUDA_DIR}/include)
#set(CUDA_LIB_DIR ${CUDA_DIR}/lib64)
set(CUDA_DIR "/usr/local/cuda-11.7")
set(CUDA_INCLUDE_DIR ${CUDA_DIR}/include)
set(CUDA_LIB_DIR ${CUDA_DIR}/lib64)
#TensorRT
#set(TENSORRT_INCLUDE_DIR ${X86_LINUX_INCLUDE_DIR}) #tensorrt/usr/include/aarch64-linux-gnu
#set(TENSORRT_LIB_DIR ${X86_LINUX_LIB_DIR}) #tensorrt/usr/lib/aarch64-linux-gnu
set(TENSORRT_INCLUDE_DIR ${X86_LINUX_INCLUDE_DIR}) #tensorrt/usr/include/aarch64-linux-gnu
set(TENSORRT_LIB_DIR ${X86_LINUX_LIB_DIR}) #tensorrt/usr/lib/aarch64-linux-gnu
#ai_matrix
set(ai_matrix_Folder ${PROJECT_SRC_ROOT}/ai_matrix)
@ -95,34 +95,30 @@ include_directories(${ai_matrix_Folder}/Utils)
include_directories(${ai_matrix_Folder}/Http)
include_directories(${ai_matrix_Folder}/Config)
# DealRfidEngine
include_directories(${PROJECT_SRC_ROOT}/engine/DealRfidEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/DealRfidEngine DealRfidEngine_SRC)
# ApiEngine
include_directories(${PROJECT_SRC_ROOT}/engine/ApiEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/ApiEngine ApiEngine_SRC)
# GetRfidEngine
include_directories(${PROJECT_SRC_ROOT}/engine/GetRfidEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/GetRfidEngine GetRfidEngine_SRC)
# DataSourceEngine
include_directories(${PROJECT_SRC_ROOT}/engine/DataSourceEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/DataSourceEngine DataSourceEngine_SRC)
# HttpUpResultEngine
include_directories(${PROJECT_SRC_ROOT}/engine/HttpUpResultEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/HttpUpResultEngine HttpUpResultEngine_SRC)
# VideoDecodeEngine
include_directories(${PROJECT_SRC_ROOT}/engine/DecodeEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/DecodeEngine DecodeEngine_SRC)
# SaveResultEngine
include_directories(${PROJECT_SRC_ROOT}/engine/SaveResultEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/SaveResultEngine SaveResultEngine_SRC)
# SaveRfidEngine
include_directories(${PROJECT_SRC_ROOT}/engine/SaveRfidEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/SaveRfidEngine SaveRfidEngine_SRC)
# SocketServerDemoEngine
include_directories(${PROJECT_SRC_ROOT}/engine/VideoAuxiliaryEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/VideoAuxiliaryEngine VideoAuxiliaryEngine_SRC)
# MoveEngine
include_directories(${PROJECT_SRC_ROOT}/engine/MoveEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/MoveEngine MoveEngine_SRC)
# SaveImageEngine
include_directories(${PROJECT_SRC_ROOT}/engine/SaveImageEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/SaveImageEngine SaveImageEngine_SRC)
#
include_directories(
#base include
${PROJECT_SOURCE_DIR}/base
${PROJECT_SOURCE_DIR}/base/BlockingQueue
${PROJECT_SOURCE_DIR}/base/CBase64
${PROJECT_SOURCE_DIR}/base/CommandParser
@ -131,21 +127,22 @@ include_directories(
${PROJECT_SOURCE_DIR}/base/ErrorCode
${PROJECT_SOURCE_DIR}/base/FileManager
${PROJECT_SOURCE_DIR}/base/Log
${PROJECT_SOURCE_DIR}/base/Yolo
#code include
${PROJECT_SOURCE_DIR}/code/common
# ${PROJECT_SOURCE_DIR}/code/cuda_utils
# ${PROJECT_SOURCE_DIR}/code/model
# ${PROJECT_SOURCE_DIR}/code/preprocess
# ${PROJECT_SOURCE_DIR}/code/inference
# ${PROJECT_SOURCE_DIR}/code/postprocess
${PROJECT_SOURCE_DIR}/code/BaseSocket
${PROJECT_SOURCE_DIR}/code/BaseComPort
${PROJECT_SOURCE_DIR}/base/common
${PROJECT_SOURCE_DIR}/base/cuda_utils
${PROJECT_SOURCE_DIR}/base/model
${PROJECT_SOURCE_DIR}/base/preprocess
${PROJECT_SOURCE_DIR}/base/inference
${PROJECT_SOURCE_DIR}/base/postprocess
${PROJECT_SOURCE_DIR}/base/BaseSocket
${PROJECT_SOURCE_DIR}/base/BaseComPort
#third party include
# ${CUDA_INCLUDE_DIR}
# ${TENSORRT_INCLUDE_DIR}
# ${OpenCV_DIR}
${CUDA_INCLUDE_DIR}
${TENSORRT_INCLUDE_DIR}
${OpenCV_DIR}
${X86_LINUX_INCLUDE_DIR}
${SYS_USR_LOCAL_INCLUDE_DIR}
)
@ -153,9 +150,9 @@ include_directories(
#
link_directories(${SYS_USR_LOCAL_LIB_DIR}
${X86_LINUX_LIB_DIR}
# ${OPENCV_LIB_DIR}
# ${CUDA_LIB_DIR}
# ${TENSORRT_LIB_DIR}
${OPENCV_LIB_DIR}
${CUDA_LIB_DIR}
${TENSORRT_LIB_DIR}
)
#
@ -170,23 +167,23 @@ file(
${PROJECT_SOURCE_DIR}/base/ErrorCode/*.cpp
${PROJECT_SOURCE_DIR}/base/FileManager/*.cpp
${PROJECT_SOURCE_DIR}/base/Log/*.cpp
${PROJECT_SOURCE_DIR}/base/Yolo/*.cpp
#code src
${PROJECT_SOURCE_DIR}/code/common/*.cpp
# ${PROJECT_SOURCE_DIR}/code/cuda_utils/*.cpp
# ${PROJECT_SOURCE_DIR}/code/preprocess/*.cu
# ${PROJECT_SOURCE_DIR}/code/inference/*.cu
# ${PROJECT_SOURCE_DIR}/code/postprocess/*.cpp
${PROJECT_SOURCE_DIR}/code/BaseSocket/*.cpp
${PROJECT_SOURCE_DIR}/code/BaseComPort/*.cpp
${PROJECT_SOURCE_DIR}/base/common/*.cpp
${PROJECT_SOURCE_DIR}/base/cuda_utils/*.cpp
${PROJECT_SOURCE_DIR}/base/preprocess/*.cu
${PROJECT_SOURCE_DIR}/base/inference/*.cu
${PROJECT_SOURCE_DIR}/base/postprocess/*.cpp
${PROJECT_SOURCE_DIR}/base/BaseSocket/*.cpp
${PROJECT_SOURCE_DIR}/base/BaseComPort/*.cpp
# engine
${DealRfidEngine_SRC}
${GetRfidEngine_SRC}
${HttpUpResultEngine_SRC}
${SaveResultEngine_SRC}
${SaveRfidEngine_SRC}
${VideoAuxiliaryEngine_SRC}
${ApiEngine_SRC}
${DataSourceEngine_SRC}
${DecodeEngine_SRC}
${MoveEngine_SRC}
${SaveImageEngine_SRC}
)
#
@ -198,19 +195,19 @@ add_executable(${PROJECT_NAME}
)
# TensorRT
#target_link_libraries(
# ${PROJECT_NAME}
# nvinfer
# nvonnxparser
# nvcaffe_parser
# nvinfer_plugin
#)
target_link_libraries(
${PROJECT_NAME}
nvinfer
nvonnxparser
nvcaffe_parser
nvinfer_plugin
)
target_link_libraries(
${PROJECT_NAME}
${Boost_LIBRARIES}
# ${CUDA_LIBRARIES}
# ${OpenCV_LIBS}
${CUDA_LIBRARIES}
${OpenCV_LIBS}
${_REFLECTION}
# FFmpeg

View File

@ -6,7 +6,7 @@ namespace ai_matrix
Config::GarbageCollector Config::gc_;
std::mutex Config::mx_;
Config *Config::GetIns()
Config *Config::getins()
{
//双层锁,确保线程安全
if (pInstance_ == nullptr)
@ -37,14 +37,41 @@ namespace ai_matrix
// 控制参数
this->baseConfig_.strTrackName = config_["base"]["track_name"].as<std::string>();
this->baseConfig_.bTestModel = config_["base"]["test_model"].as<bool>();
this->baseConfig_.iConnectType = config_["base"]["connect_type"].as<int>();
this->baseConfig_.iApiPort = config_["base"]["api_port"].as<int>();
this->baseConfig_.bUpResult = config_["base"]["up_result"].as<bool>();
this->baseConfig_.strLogPath = config_["base"]["log_path"].as<std::string>();
this->baseConfig_.strResultPath = config_["base"]["result_path"].as<std::string>();
this->baseConfig_.strDebugResultPath = config_["base"]["debug_result_path"].as<std::string>();
this->baseConfig_.iResultSaveDays = config_["base"]["result_save_days"].as<int>();
// 日志参数
this->logConfig_.strLevel = config_["log"]["level"].as<std::string>();
this->logConfig_.strOutLevel = config_["log"]["out_level"].as<std::string>();
this->logConfig_.strSaveLevel = config_["log"]["save_level"].as<std::string>();
// 数据源参数
this->dataSourceConfig_.strUrl = config_["data_source"]["url"].as<std::string>();
this->dataSourceConfig_.iSkipInterval = config_["data_source"]["skip_interval"].as<int>();
this->dataSourceConfig_.iDirection = config_["data_source"]["direction"].as<int>();
this->dataSourceConfig_.iLeftFirst = config_["data_source"]["left_first"].as<int>();
this->dataSourceConfig_.iRightFirst = config_["data_source"]["right_first"].as<int>();
// 识别参数
this->identifyConfig_.strRunModel = config_["identify"]["run_mode"].as<std::string>();
this->identifyConfig_.bNeedMoveDetectFlag = config_["identify"]["need_move_detect_flag"].as<bool>();
this->identifyConfig_.strIdentifyDirection = config_["identify"]["identify_direction"].as<std::string>();
this->identifyConfig_.iPartitionFrameSpan = config_["identify"]["partition_frame_span"].as<int>();
this->identifyConfig_.iSplitFrameSpanPx = config_["identify"]["split_frame_span_px"].as<int>();
this->identifyConfig_.iChkstopPx = config_["identify"]["chkstop_px"].as<int>();
this->identifyConfig_.iChkstopCount = config_["identify"]["chkstop_count"].as<int>();
this->identifyConfig_.iNumFrameHeight = config_["identify"]["num_frame_height"].as<int>();
this->identifyConfig_.iProFrameHeight = config_["identify"]["pro_frame_height"].as<int>();
this->identifyConfig_.iSpaceFrameWidth = config_["identify"]["space_frame_width"].as<int>();
this->identifyConfig_.bTrainHeardDetect = config_["identify"]["train_heard_detect"].as<bool>();
// websocket server 服务端参数
this->wSocketConfig_.bIsUse = config_["wsocket_server"]["is_use"].as<bool>();
this->wSocketConfig_.iPort = config_["wsocket_server"]["port"].as<int>();
this->wSocketConfig_.iMaxQueueLen = config_["wsocket_server"]["max_queue_len"].as<int>();
// http服务器参数
this->httpServerConfig_.bIsUse = config_["http_server"]["is_use"].as<bool>();
@ -52,6 +79,7 @@ namespace ai_matrix
this->httpServerConfig_.iPort = config_["http_server"]["http_port"].as<int>();
this->httpServerConfig_.strTokenUrl = config_["http_server"]["token_path"].as<std::string>();
this->httpServerConfig_.strUpResultUrl = config_["http_server"]["up_result_path"].as<std::string>();
this->httpServerConfig_.strUpDeviceStatusUrl = config_["http_server"]["device_status_url"].as<std::string>();
this->httpServerConfig_.strUserName = config_["http_server"]["username"].as<std::string>();
this->httpServerConfig_.strPassword = config_["http_server"]["password"].as<std::string>();
@ -63,6 +91,53 @@ namespace ai_matrix
return 0;
}
int Config::readModelYaml(std::string &strPath)
{
try
{
strConfigYamlPath_ = strPath;
config_ = YAML::LoadFile(strPath);
//退出程序
if (config_.IsNull())
{
printf("config.yaml no find");
return -1;
}
// 来车检测模型
this->modelConfig_move_.strModelPath = config_["move_model"]["model_path"].as<std::string>();
this->modelConfig_move_.fScoreThreshold = config_["move_model"]["score_threshold"].as<float>();
this->modelConfig_move_.vecClass = config_["move_model"]["class"].as<std::vector<std::string>>();
// 车厢第一步模型
this->modelConfig_trainStep1_.strModelPath = config_["train_step1_model"]["model_path"].as<std::string>();
this->modelConfig_trainStep1_.fScoreThreshold = config_["train_step1_model"]["score_threshold"].as<float>();
this->modelConfig_trainStep1_.vecClass = config_["train_step1_model"]["class"].as<std::vector<std::string>>();
// 车厢第二步模型
this->modelConfig_trainStep2_.strModelPath = config_["train_step2_model"]["model_path"].as<std::string>();
this->modelConfig_trainStep2_.fScoreThreshold = config_["train_step2_model"]["score_threshold"].as<float>();
this->modelConfig_trainStep2_.vecClass = config_["train_step2_model"]["class"].as<std::vector<std::string>>();
// 集装箱第一步模型
this->modelConfig_containerStep1_.strModelPath = config_["container_step1_model"]["model_path"].as<std::string>();
this->modelConfig_containerStep1_.fScoreThreshold = config_["container_step1_model"]["score_threshold"].as<float>();
this->modelConfig_containerStep1_.vecClass = config_["container_step1_model"]["class"].as<std::vector<std::string>>();
// 集装箱第二步模型
this->modelConfig_containerStep2_.strModelPath = config_["container_step2_model"]["model_path"].as<std::string>();
this->modelConfig_containerStep2_.fScoreThreshold = config_["container_step2_model"]["score_threshold"].as<float>();
this->modelConfig_containerStep2_.vecClass = config_["container_step2_model"]["class"].as<std::vector<std::string>>();
}
catch (...) //捕获所有异常
{
return -1;
}
return 0;
}
int Config::writeYaml()
{
try
@ -149,6 +224,16 @@ namespace ai_matrix
return strTmp;
}
RunConfig Config::getRunConfig() const
{
return this->runConfig_;
}
void Config::setRunConfig(const ai_matrix::RunConfig runConfig)
{
this->runConfig_ = runConfig;
}
BaseConfig Config::getBaseConfig() const
{
return this->baseConfig_;
@ -169,12 +254,93 @@ namespace ai_matrix
this->logConfig_ = logConfig;
}
DataSourceConfig Config::getDataSourceConfig() const
{
return this->dataSourceConfig_;
}
void Config::setDataSourceConfig(const ai_matrix::DataSourceConfig dataSourceConfig)
{
this->dataSourceConfig_ = dataSourceConfig;
}
IdentifyConfig Config::getIdentifyConfig() const
{
return this->identifyConfig_;
}
void Config::setIdentifyConfig(const ai_matrix::IdentifyConfig identifyConfig)
{
this->identifyConfig_ = identifyConfig;
}
WSocketConfig Config::getWSocketConfig() const
{
return this->wSocketConfig_;
}
void Config::setWSocketConfig(const ai_matrix::WSocketConfig wSocketConfig)
{
this->wSocketConfig_ = wSocketConfig;
}
HttpServerConfig Config::getHttpServerConfig() const
{
return this->httpServerConfig_;
}
void Config::setHttpServerConfig(const HttpServerConfig httpServerConfig) {
void Config::setHttpServerConfig(const HttpServerConfig httpServerConfig)
{
this->httpServerConfig_ = httpServerConfig;
}
ModelConfig Config::getModelByMoveConfig() const
{
return this->modelConfig_move_;
}
void Config::setModelByMoveConfig(const ai_matrix::ModelConfig modelConfig)
{
this->modelConfig_move_ = modelConfig;
}
ModelConfig Config::getModelByTrainStep1Config() const
{
return this->modelConfig_trainStep1_;
}
void Config::setModelByTrainStep1Config(const ai_matrix::ModelConfig modelConfig)
{
this->modelConfig_trainStep1_ = modelConfig;
}
ModelConfig Config::getModelByTrainStep2Config() const
{
return this->modelConfig_trainStep2_;
}
void Config::setModelByTrainStep2Config(const ai_matrix::ModelConfig modelConfig)
{
this->modelConfig_trainStep2_ = modelConfig;
}
ModelConfig Config::getModelByContainerStep1Config() const
{
return this->modelConfig_containerStep1_;
}
void Config::setModelByContainerStep1Config(const ai_matrix::ModelConfig modelConfig)
{
this->modelConfig_trainStep1_ = modelConfig;
}
ModelConfig Config::getModelByContainerStep2Config() const
{
return this->modelConfig_trainStep2_;
}
void Config::setModelByContainerStep2Config(const ai_matrix::ModelConfig modelConfig)
{
this->modelConfig_trainStep2_ = modelConfig;
}
}

View File

@ -20,6 +20,13 @@
namespace ai_matrix
{
struct RunConfig
{
// 识别状态
bool bRun;
};
// 基础控制参数
struct BaseConfig
{
@ -27,22 +34,75 @@ namespace ai_matrix
std::string strTrackName;
// 测试模式
bool bTestModel;
// 连接模式
int iConnectType;
// Api 监听端口
int iApiPort;
// 是否上传识别结果
bool bUpResult;
// 日志文件目录
std::string strLogPath;
// 识别结果目录
std::string strResultPath;
// 调试结果目录
std::string strDebugResultPath;
// 日志存储天数
int iResultSaveDays;
};
// 日志参数
struct LogConfig{
// 日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
std::string strLevel;
struct LogConfig {
// 输出日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
std::string strOutLevel;
// 保存日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
std::string strSaveLevel;
};
// 数据源参数
struct DataSourceConfig {
// 数据源地址
std::string strUrl;
// 跳帧数
int iSkipInterval;
// 行驶方向 0-自动识别 1-向左 2-向右 (与“首位信息”成对存在,形成例如向左就编号在前,向右就属性在前的对应)
int iDirection;
// 0-向左编号在前 1-向左属性在前 (向右行驶的情况2-向右编号在前 3-向右属性在前)
int iLeftFirst;
// (向左行驶的情况0-向左编号在前 1-向左属性在前) 2-向右编号在前 3-向右属性在前
int iRightFirst;
};
// 识别参数
struct IdentifyConfig {
// 运行方式
std::string strRunModel;
// 是否开启动态检测
bool bNeedMoveDetectFlag;
// 识别方向 [LEFT,RIGHT,ALL]
std::string strIdentifyDirection;
// 大框帧跨度(比一个大框从出现到消失的跨度稍大一点, 跟跳帧有关系)
int iPartitionFrameSpan;
// 大框帧跨度的位置像素差异
int iSplitFrameSpanPx;
// 每帧大框位置差异最小值 (持续小于此值,则可能停车)
int iChkstopPx;
// 持续X次续位置差异小于gc_chkstop_px则判断为停车。
int iChkstopCount;
// 过滤最小大框高度(不需要的话就写个很小的值)
int iNumFrameHeight;
int iProFrameHeight;
// 过滤最大框宽度(不需要的话就写个很大的值)
int iSpaceFrameWidth;
// 是否识别车头
bool bTrainHeardDetect;
};
// websocket_server 的服务端参数
struct WSocketConfig {
// 是否启用
bool bIsUse;
// 端口
int iPort;
// 最大链接队列
int iMaxQueueLen;
};
// web服务器参数
@ -58,19 +118,30 @@ namespace ai_matrix
std::string strTokenUrl;
// 识别结果上传地址
std::string strUpResultUrl;
// 设备状态上传地址
std::string strUpDeviceStatusUrl;
// 接口用户名
std::string strUserName;
// 接口密码
std::string strPassword;
};
struct ModelConfig
{
std::string strModelPath;
float fScoreThreshold;
std::vector<std::string> vecClass;
};
class Config final
{
public:
static Config *GetIns();
static Config *getins();
// 读yaml文件
int readYaml(std::string &strPath);
int readModelYaml(std::string &strPath);
// 写yaml文件
int writeYaml();
std::string getStringValue(const char *pszKey, const YAML::Node *pConfig = nullptr) const;
@ -79,10 +150,9 @@ namespace ai_matrix
float getFloatValue(const char *pszKey, const YAML::Node *pConfig = nullptr) const;
std::string getPathValue(const char *pszKey, const YAML::Node *pConfig =nullptr) const;
// 初始化运行数据
// RunningData initRunningData();
// RunningData setRunningData(const RunningData runningData);
// RunningData getRunningData() const;
RunConfig getRunConfig() const;
void setRunConfig(const RunConfig runConfig);
// 获取控制参数
BaseConfig getBaseConfig() const;
@ -90,9 +160,34 @@ namespace ai_matrix
// 获取日志参数
LogConfig getLogConfig() const;
void setLogConfig(const LogConfig logConfig);
// 获取数据源参数
DataSourceConfig getDataSourceConfig() const;
void setDataSourceConfig(const DataSourceConfig dataSourceConfig);
// 获取识别参数
IdentifyConfig getIdentifyConfig() const;
void setIdentifyConfig(const IdentifyConfig identifyConfig);
// 获取websocket server 参数
WSocketConfig getWSocketConfig() const;
void setWSocketConfig(const WSocketConfig wSocketConfig);
// 获取web服务器参数
HttpServerConfig getHttpServerConfig() const;
void setHttpServerConfig(const HttpServerConfig httpServerConfig);
// 获取来车检测模型参数
ModelConfig getModelByMoveConfig() const;
void setModelByMoveConfig(const ModelConfig modelConfig);
// 获取来车检测模型参数
ModelConfig getModelByTrainStep1Config() const;
void setModelByTrainStep1Config(const ModelConfig modelConfig);
// 获取来车检测模型参数
ModelConfig getModelByTrainStep2Config() const;
void setModelByTrainStep2Config(const ModelConfig modelConfig);
// 获取来车检测模型参数
ModelConfig getModelByContainerStep1Config() const;
void setModelByContainerStep1Config(const ModelConfig modelConfig);
// 获取来车检测模型参数
ModelConfig getModelByContainerStep2Config() const;
void setModelByContainerStep2Config(const ModelConfig modelConfig);
YAML::Node config_;
@ -108,15 +203,39 @@ namespace ai_matrix
static std::mutex mx_; //锁,保证线程安全
std::string strConfigYamlPath_;
// (全局)运行实时变量
RunConfig runConfig_;
// 控制参数
BaseConfig baseConfig_;
// 日志参数
LogConfig logConfig_;
// 数据源参数
DataSourceConfig dataSourceConfig_;
// 识别参数
IdentifyConfig identifyConfig_;
// websocket server 服务端参数
WSocketConfig wSocketConfig_;
// web服务器参数
HttpServerConfig httpServerConfig_;
// 来车检测
ModelConfig modelConfig_move_;
// 车厢第一步识别
ModelConfig modelConfig_trainStep1_;
// 车厢第二步识别
ModelConfig modelConfig_trainStep2_;
// 集装箱第一步识别
ModelConfig modelConfig_containerStep1_;
// 集装箱第二步识别
ModelConfig modelConfig_containerStep2_;
//定义一个嵌套类,负责释放内存,操作系统自动完成,不用担心内存泄露
class GarbageCollector
{

View File

@ -30,7 +30,7 @@ namespace ai_matrix
* outParam: N/A
* return : true/false
*/
bool FileUtil::CreateDirPath(std::string strDirPath)
bool FileUtil::createDirPath(std::string strDirPath)
{
if (strDirPath.back() != '/')
{

View File

@ -33,7 +33,7 @@ namespace ai_matrix
//创建文件路径
std::string create_file_path(std::string root, std::string name, std::string suffix);
//创建文件夹路径
bool CreateDirPath(std::string strDirPath);
bool createDirPath(std::string strDirPath);
/**
*

View File

@ -39,43 +39,69 @@
#include <time.h>
#include <unistd.h>
#include "Log.h"
//#include <opencv2/opencv.hpp>
//#include <opencv2/core/core.hpp>
//#include <opencv2/highgui/highgui.hpp>
//#include <opencv2/imgproc/imgproc.hpp>
#include "json/json.h"
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
#include "CommonDataType.h"
#include "nvidia_acl_datatype.h"
#include "Config.h"
#include "TimeUtil.h"
#include "FileUtil.h"
#include "StringUtil.h"
#include "Utils.h"
#define IMAGE_WIDTH 1920 //1920 2560
#define IMAGE_HEIGHT 1080 //1080 1440
#define AEI_COMM_BUFFER_SIZE 2048
typedef struct
//来车检测模型结果
enum MonitorModelState
{
std::string strRfid;
std::string strTime;
std::string strTrainTime;
} RfidInfo;
//初始化状态
MONITOR_MODEL_INIT_STATE = -1,
//车头部分
MONITOR_MODEL_TRAIN_HEAD = 0,
//车头车体部分
MONITOR_MODEL_HEAD_FIRST = 1,
//无车
MONITOR_MODEL_NO_TRAIN = 2,
//车尾部分
MONITOR_MODEL_TRAIN_TAIL = 3,
//车体部分
MONITOR_MODEL_TRAIN_BODY = 4
};
typedef struct
// 车辆状态
enum TrainStatus
{
std::string strCarriageType;
std::string strCarriageNum;
std::string strOrder;
std::string strTrainTime;
std::string strRfidInfo;
std::string strNowTime;
} TrainInfo;
TRAINSTATUS_NO = 0, // 无车
TRAINSTATUS_RUN = 1, // 行驶
TRAINSTATUS_STOP = 2, // 停止
TRAINSTATUS_BACK = 3 // 倒车
};
typedef struct
{
@ -92,4 +118,215 @@ typedef struct Date
int day;
} Date;
//帧解码后数据
typedef struct
{
int iDataSource = 0; //数据来源标识
uint32_t iFrameId = 0;
uint32_t iWidth = 0; // Width of image
uint32_t iHeight = 0; // Height of image
uint32_t iWidthStride = 0; // Width after align up
uint32_t iHeightStride = 0; // Height after align up
// acldvppPixelFormat format = PIXEL_FORMAT_YUV_SEMIPLANAR_420; // Format of image
uint32_t iSize = 0; // Size of data in byte
std::shared_ptr<void> pData = nullptr; // Smart pointer of data
uint64_t i64TimeStamp = 0; //帧数据时间戳
bool bHostMemory = false; //数据解码后内存是否在Host侧
std::string strPicFilePath;
std::string strTrainDate; //过车日期 (格式YYYY-MM-DD)
std::string strTrainName; //车次 (格式HH-MM-SS)
bool bIsEnd = false; //列车结束标识
int iStatus = 0; //0-无车; 1-有车行驶; 2-有车停止
int iDirection = 0; //行驶方向(0-未知; 1-向左; 2-向右)
int iRate = 0; //帧率
} DecodedData;
//数据源基本信息
typedef struct
{
uint32_t iWidth = 0;
uint32_t iHeight = 0;
int iRate = 0; //帧率
} DataSourceInfo;
// 原始帧信息
typedef struct {
// 帧画面
std::shared_ptr<void> pData = nullptr;
// Size of memory, bytes
uint32_t iSize = 0;
// 帧数据时间戳
uint64_t i64TimeStamp = 0;
} SourceFrameData;
//识别处理数据
typedef struct
{
// int iDataSource = 0; //数据来源标识
std::shared_ptr<AVFormatContext*> ppFormatCtx_ = nullptr; // 解码器
DataSourceInfo dataSourceInfo; // 数据源信息
SourceFrameData sourceFrameData; // 原始帧数据
AVCodecParameters *pCodecParameters_ = nullptr;
std::shared_ptr<void> pVoidData = nullptr;
uint32_t iFrameId = 0; //帧号
uint32_t iDataNO = 0; //数据编号
std::string strTrainDate; //过车日期 (格式YYYY-MM-DD)
std::string strTrainTime; //时间 (格式HH-MM-SS)
int iDirection = 0; //行驶方向(0-未知; 1-向左; 2-向右)
int iStatus = 0; //车辆状态(0-无车; 1-有车行驶; 2-有车停止)
int iMonitorStatus = MONITOR_MODEL_INIT_STATE;
int iTrainIndex = 0; // 车厢序号
bool bIsCarriageEnd = false; //车厢结束标识
bool bIsTrainEnd = false; //列车结束标识
std::string strOrigTrainDate; //原过车日期 (格式YYYY-MM-DD)
std::string strOrigTrainName; //原车次 (格式HH-MM-SS)
uint32_t iOrigFrameId = 0; //原帧号
} ProcessData;
typedef struct
{
// 帧数据时间戳
uint64_t i64TimeStamp = 0;
// Size of memory, bytes
uint32_t iSize = 0;
// Width of image
uint32_t iWidth = 0;
// Height of image
uint32_t iHeight = 0;
// 帧率
int iRate = 0;
// 原始数据
std::shared_ptr<void> pData = nullptr;
AVCodecID avCodecID;
} VFrameData;
// 识别区域坐标
typedef struct {
// 字段代号(仅第二步使用)
int iLine = -1;
// 大小框的类别
int iClassId = -1;
// 框的名称
std::string strClassName;
// 分数
float fScore = 0;
// 清晰度
float fClear = 0;
// 左上X坐标 Left Top x
float fLTX = 0;
// 左上y坐标 Left Top y
float fLTY = 0;
// 右下y坐标 Right Bottom x
float fRBX = 0;
// 右下y坐标 Right Bottom y
float fRBY = 0;
} VCoordinate;
// 集装箱信息
typedef struct {
// 集装箱号
std::string strContainerNo;
// 集装箱最优图
std::string strImg;
// 集装箱编号坐标
VCoordinate coordinate;
} VContainer;
typedef struct {
// 股道编号
std::string strTrackNo = "0";
// 列车来车时间
std::string strTTime;
// 列车来车方向 (0:未知 1:左 2:右)
uint16_t iTDirection = 0;
// 车厢编号
uint16_t iCarOrder = 0;
// 车厢种类
uint16_t iCategory = 0;
// 车型
std::string strTType;
// 车厢编号
std::string strTNum;
// 车厢载重
std::string strTLoadWeight;
// 车厢自重(皮重)
std::string strTTareWeight;
// 车厢容积
std::string strTVolume;
// 车厢换长
std::string strTChangeLen;
// 车厢编号图片
std::string strTNum_image;
// 车厢属性图片
std::string strTPro_image;
// 车厢开始帧号
uint32_t iStartFrame = 0;
// 车厢结束帧号
uint32_t iEndFrame = 0;
// 跳帧数
uint32_t iSkipFrame = 0;
// 车厢开始时间
std::string strStartTime;
// 车厢结束时间
std::string strEndTime;
// 是否最后一节车厢
bool bIsTheLast = false;
// 车厢编号大框坐标
VCoordinate numCoordinate;
// 车厢属性大框坐标
VCoordinate proCoordinate;
// 集装箱信息
std::vector<VContainer> vecContainer;
} VTrainInfo;
typedef struct {
// 模型地址
std::string strModelPath;
// 图片地址
std::string strImagePath;
// 模型类型[0:检测 detect 1:分类 classify]
uint8_t iModel = 0;
// 模型版本["v5","v8"]
std::string strModelVersion;
// 类别模板地址
std::string strClassModelPath;
} VYoloTestInfo;
//动态检测结果
typedef struct
{
uint32_t iFrameId = 0; //帧号
uint64_t i64TimeStamp = 0; //帧数据时间戳
bool bHasTrain = false; //有无车标志
bool bIsEnd = false; //列车结束标识
std::string strTrainDate; //过车日期 (格式YYYY-MM-DD)
std::string strTrainName; //车次 (格式HH-MM-SS)
int iDirection = 0; //行驶方向(0-未知; 1-向左; 2-向右)
int iTrainStage = MONITOR_MODEL_INIT_STATE;
} MoveData;
// 存图数据
typedef struct
{
// 车厢位置状态
std::string strTrainStage;
// 行车方向
std::string strDirection;
// 车厢运动状态
std::string strTrainStatus;
// 帧图像数据
VFrameData frameData;
// 图片存储目录
std::string strImgPath;
// 图片名字
std::string strImgName;
// 图片需要标记的框坐标
std::vector<VCoordinate> vecCoordinate;
} SaveImgData;
// 0 = 停止识别; 1 = 开始识别
extern std::atomic<int> g_identify_type;
// -1 = 未知方向; 1 = 向左行驶; 2 = 向右行驶
extern std::atomic<int> g_come_direction;
#endif

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
#ifndef _YOLOV5_CLERAITY_INFERENCE_H_ #define _YOLOV5_CLERAITY_INFERENCE_H_ #include <string> #include "logging.h" #include <opencv2/opencv.hpp> #include <NvCaffeParser.h> #include <NvInfer.h> #include <NvInferPlugin.h> #include <NvOnnxParser.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> typedef struct _YoloV5ModelParam { unsigned int uiClassNum; unsigned int uiClearNum; unsigned int uiDetSize; float fScoreThreshold; float fNmsThreshold; } YoloV5ModelParam; typedef struct _ModelCommonInfo { unsigned int uiModelWidth; unsigned int uiModelHeight; unsigned int uiInputSize; unsigned int uiOutputSize; unsigned int uiChannel; unsigned int uiBatchSize; std::string strInputBlobName; std::string strOutputBlobName; } ModelCommonInfo; typedef struct _YoloV5ModelInfo { YoloV5ModelParam yolov5ModelParam; ModelCommonInfo modelCommonInfo; } YoloV5ModelInfo; #define LOCATIONS 4 struct alignas(float) stDetection { //alignas(float) float bbox[LOCATIONS]; float class_conf; float clear_conf; int class_id; int clear_id; }; class YoloV5Inference { public: YoloV5Inference(); ~YoloV5Inference(); int YoloV5InferenceInit(YoloV5ModelInfo &stYoloV5ModelInfo, std::string strModelName, std::string strEngineName); int YoloV5InferenceDeinit(void); int YoloV5InferenceModel(cv::Mat& frame, std::vector<stDetection>& res); private: float GetResizeRatio(unsigned int img_width, unsigned int img_height, unsigned int model_width, unsigned int model_height); void doInference(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void** buffers, unsigned int inputIndex, float* input, int inputSize, unsigned int ouputIndex, float* output, int outputSize, int batchSize); void doInferenceV2(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void** buffers, unsigned int outputIndex, float* output, int outputSize, int batchSize); void decode_opencv_nms(std::vector<stDetection>& res, float *output, int outSize, unsigned int detSize, unsigned int classNum, unsigned int clearNum, float confThresh, float nmsThresh = 0.5); void CenterResetLocation(float fResizeRatio, unsigned int orig_width, unsigned int orig_height, unsigned int input_w, unsigned int input_h, stDetection &detection); void xywh2xyxy(float *xywh, float * xyxy); cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h); private: YoloV5ModelInfo stYoloV5ModelInfo_; cudaStream_t* pImagePreprocessStream_ = nullptr; // 图像预处理CUDA流 cudaStream_t* pInferenceModelStream_ = nullptr; // 模型推理CUDA流 float* pfBuffers_[2]; float* pfInputData_ = nullptr; float* pfOutputData_ = nullptr; uint8_t* pu8ImgHost_ = nullptr; // 相关内存分配 uint8_t* pu8ImgDevice_ = nullptr; unsigned int uiInputIndex_ = 0, uiOutputIndex_ = 0; Logger* pGLogger_ = nullptr; nvinfer1::IRuntime* pRuntime_ = nullptr; nvinfer1::ICudaEngine* pEngine_ = nullptr; nvinfer1::IExecutionContext* pContext_ = nullptr; }; #endif // END OF _YOLOV5_CLERAITY_INFERENCE_H_

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
// Copyright (c) 2023 Shandong Matrix Software Engineering Co., Ltd All rights reserved. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT // SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE // FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. #ifndef _YOLOV8_INFERENCE_H_ #define _YOLOV8_INFERENCE_H_ #include <string> #include "logging.h" #include <opencv2/opencv.hpp> #include <NvCaffeParser.h> #include <NvInfer.h> #include <NvInferPlugin.h> #include <NvOnnxParser.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> typedef struct _YoloV5ModelParam { unsigned int uiClassNum; unsigned int uiClearNum; unsigned int uiDetSize; float fScoreThreshold; float fNmsThreshold; } YoloV5ModelParam; typedef struct _ModelCommonInfo { unsigned int uiModelWidth; unsigned int uiModelHeight; unsigned int uiInputSize; unsigned int uiOutputSize; unsigned int uiChannel; unsigned int uiBatchSize; std::string strInputBlobName; std::string strOutputBlobName; } ModelCommonInfo; typedef struct _YoloV5ModelInfo { YoloV5ModelParam yolov5ModelParam; ModelCommonInfo modelCommonInfo; } YoloV5ModelInfo; #define LOCATIONS 4 struct alignas(float) stDetection { //alignas(float) float bbox[LOCATIONS]; float class_conf; float clear_conf; int class_id; int clear_id; }; class YoloV8Inference { public: YoloV8Inference(); ~YoloV8Inference(); int YoloV8InferenceInit(YoloV5ModelInfo &pYoloV5ModelInfo, const std::string& strModelName, const std::string& strEngineName); int YoloV8InferenceDeinit(void); // 空重识别 int EmptyHeavyCheck(cv::Mat& frame, bool& res); int YoloV8InferenceModelCommon(cv::Mat& frame, float& fResizeRatio); int YoloV8InferenceModelGetType(cv::Mat& frame, float* fResult, int nsize); cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h); float GetResizeRatio(unsigned int img_width, unsigned int img_height, unsigned int model_width, unsigned int model_height); void doInference(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void** buffers, unsigned int inputIndex, float* input, int inputSize, unsigned int ouputIndex, float* output, int outputSize, int batchSize); void doInferenceV2(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void** buffers, unsigned int outputIndex, float* output, int outputSize, int batchSize); private: float* pfTransposeData_ = nullptr; YoloV5ModelInfo stYoloV5ModelInfo_; cudaStream_t* pImagePreprocessStream_ = nullptr; // 图像预处理CUDA流 cudaStream_t* pInferenceModelStream_ = nullptr; // 模型推理CUDA流 float* pfBuffers_[2]; float* pfInputData_ = nullptr; float* pfOutputData_ = nullptr; uint8_t* pu8ImgHost_ = nullptr; // 相关内存分配 uint8_t* pu8ImgDevice_ = nullptr; unsigned int uiInputIndex_ = 0, uiOutputIndex_ = 0; Logger* pGLogger_ = nullptr; nvinfer1::IRuntime* pRuntime_ = nullptr; nvinfer1::ICudaEngine* pEngine_ = nullptr; nvinfer1::IExecutionContext* pContext_ = nullptr; }; #endif // END OF _YOLOV8_CLERAITY_INFERENCE_H_

1
src/base/Yolo/logging.h Normal file

File diff suppressed because one or more lines are too long

1
src/base/Yolo/macros.h Normal file
View File

@ -0,0 +1 @@
#ifndef __MACROS_H #define __MACROS_H #ifdef API_EXPORTS #if defined(_MSC_VER) #define API __declspec(dllexport) #else #define API __attribute__((visibility("default"))) #endif #else #if defined(_MSC_VER) #define API __declspec(dllimport) #else #define API #endif #endif // API_EXPORTS #if NV_TENSORRT_MAJOR >= 8 #define TRT_NOEXCEPT noexcept #define TRT_CONST_ENQUEUE const #else #define TRT_NOEXCEPT #define TRT_CONST_ENQUEUE #endif #endif // __MACROS_H

View File

@ -6,7 +6,7 @@ path_cur=$(cd `dirname $0`; pwd)
# 生成目录
app_path="app"
# 可执行程序名
appname="Matrix"
appname="train"
# 创建build目录
function prepare_path() {

View File

@ -1,24 +1,70 @@
# 基础控制参数
base:
# 股道名称
track_name: "001"
track_name: "1"
# 测试模式
test_model: false
# 连接模式 【0:网口1:串口】
connect_type: 0
# Api 监听端口
api_port: 7070
# 是否上传识别结果
up_result: false
# 日志文件目录
log_path: "./logs"
# 识别结果目录
result_path: "./result"
# 日志存储天数
# 调试结果目录
debug_result_path: "./debug_result"
# 结果存储天数
result_save_days: 10
# 日志参数
log:
# 日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
level: "DEBUG"
# 输出日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
out_level: "DEBUG"
# 保存日志级别
save_level: "DEBUG"
# 数据源参数
data_source:
url: "./vedio/buertai2.mp4"
# 跳帧数
skip_interval: 3
# 行驶方向 0-自动识别 1-向左 2-向右 (与“首位信息”成对存在,形成例如向左就编号在前,向右就属性在前的对应)
direction: 0
# 0-向左编号在前 1-向左属性在前 (向右行驶的情况2-向右编号在前 3-向右属性在前)
left_first: 0
# (向左行驶的情况0-向左编号在前 1-向左属性在前) 2-向右编号在前 3-向右属性在前
right_first: 3
# 识别参数
identify:
# 运行方式
run_mode: "always" #[always; command]
# 是否开启动态检测
need_move_detect_flag: true
# 识别方向 [LEFT,RIGHT,ALL]
identify_direction: "LEFT"
# 大框帧跨度(比一个大框从出现到消失的跨度稍大一点, 跟跳帧有关系)
partition_frame_span: 20
# 大框帧跨度的位置像素差异
split_frame_span_px: 200
# 每帧大框位置差异最小值 (持续小于此值,则可能停车)
chkstop_px: 15
# 持续X次续位置差异小于gc_chkstop_px则判断为停车。
chkstop_count: 10
# 过滤最小大框高度(不需要的话就写个很小的值)
num_frame_height: 150
pro_frame_height: 120
# 过滤最大框宽度(不需要的话就写个很大的值)
space_frame_width: 500
# 是否识别车头
train_heard_detect: true
# websocket_server 的服务端参数
wsocket_server:
is_use: false
port: 7071
max_queue_len: 10
# http 接口
http_server:
@ -32,6 +78,8 @@ http_server:
token_path: "/api/blade-auth/oauth/token"
# 识别结果上传地址
up_result_path: "/api/train-carriage/identification/rfid-save"
# 设备状态上传地址
device_status_url: ""
# 接口用户名
username: "guest_01"
# 接口密码

View File

@ -4,18 +4,14 @@ use_deviceid:
#engine实例
engines:
GetRfidEngine: 0
DealRfidEngine: 0
SaveRfidEngine: 0
HttpUpResultEngine: 0
SaveResultEngine: 0
DelExpiredEngine: 0
VideoAuxiliaryEngine: 0
ApiEngine: 0
VideoEngine: 0
VideoDecodeEngine: 0
MoveEngine: 0
SaveImageEngine: 0
#engine连接
connects:
GetRfidEngine_0_0: "DealRfidEngine_0_0 1024"
VideoAuxiliaryEngine_0_0: "DealRfidEngine_0_1 1024"
DealRfidEngine_0_0: "SaveRfidEngine_0_0 1024"
DealRfidEngine_0_1: "HttpUpResultEngine_0_0 1024"
DealRfidEngine_0_2: "SaveResultEngine_0_0 1024"
VideoEngine_0_0: "VideoDecodeEngine_0_0 1024"
VideoDecodeEngine_0_0: "MoveEngine_0_0 1024"
MoveEngine_0_0: "SaveImageEngine_0_0 1024"

View File

@ -0,0 +1,47 @@
# 动态检测
move_model:
model_path: "./model/step0/step0.engine"
score_threshold: 0
class: []
# 关键区域识别
train_step1_model:
model_path: "./model/step1/step1.engine"
score_threshold: 0.6
class: [ "HEADNUM",
"PROPERTY",
"TYPENUM_K",
"TYPENUM_C",
"TYPENUM_P",
"TYPENUM_G",
"TYPENUM_N",
"MAINTENFLAG",
"TYPENUM_J",
"SPACE",
"SPACELK",
"SPACENX",
"SPACEG",
"SPACEP",
"SPACEJ",
"TYPENUM_W",
"SPACEW",
"SPACEM",
"SPACEU"
]
# 字符识别
train_step2_model:
model_path: "./model/step2/step2.engine"
score_threshold: 0.7
class: ["0","1","2","3","4","5","6","7","8","9",
"A","B","C","D","E","F","G","H","I","J","K","L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V","W", "X", "Y", "Z",
"change", "load", "m", "self", "t", "volume", "meter",")", "(", "?", "-"
]
# 集装箱关键区域识别
container_step1_model:
model_path: "./model/container_step1/con1.engine"
score_threshold: 0.6
class: []
# 集装箱字符识别
container_step2_model:
model_path: "./model/container_step2/con2.engine"
score_threshold: 0.7
class: []

View File

@ -0,0 +1,610 @@
#include "ApiEngine.h"
#include "AppCommon.h"
using namespace ai_matrix;
ApiEngine::ApiEngine() {}
ApiEngine::~ApiEngine() {}
APP_ERROR ApiEngine::Init()
{
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
strPort1_ = engineName_ + "_" + std::to_string(engineId_) + "_1";
LogInfo << "engineId_:" << engineId_ << " ApiEngine Init ok";
return APP_ERR_OK;
}
APP_ERROR ApiEngine::DeInit()
{
LogInfo << "ApiEngine engineId_:" << engineId_ << " DeInit ok";
return APP_ERR_OK;
}
APP_ERROR ApiEngine::Process()
{
int iRet = APP_ERR_OK;
httplib::Server svr;
svr.set_read_timeout(3, 0); // 5 seconds
svr.set_write_timeout(3, 0); // 5 seconds
svr.set_error_handler([](const httplib::Request& req, httplib::Response& res) {
auto fmt = "<p>Error Status: <span style='color:red;'>%d</span></p>";
char buf[BUFSIZ];
snprintf(buf, sizeof(buf), fmt, res.status);
res.set_content(buf, "text/html");
});
svr.set_exception_handler([](const httplib::Request& req, httplib::Response& res, std::exception_ptr ep) {
auto fmt = "<h1>Error 500</h1><p>%s</p>";
char buf[BUFSIZ];
try {
std::rethrow_exception(ep);
} catch (std::exception &e) {
snprintf(buf, sizeof(buf), fmt, e.what());
} catch (...) { // See the following NOTE
snprintf(buf, sizeof(buf), fmt, "Unknown Exception");
}
res.set_content(buf, "text/html");
res.status = httplib::StatusCode::InternalServerError_500;
});
// svr.set_logger([](const httplib::Request& req, const httplib::Response& res){
// LogInfo << "请求信息:" << req.body;
// LogInfo << "返回信息:" << res.body;
// });
if (!svr.set_mount_point("/public", Config::getins()->getBaseConfig().strResultPath))
{
LogError << "创建识别结果文件服务失败,请检查目录是否存在:" << Config::getins()->getBaseConfig().strResultPath;
}
if (!svr.set_mount_point("/logs", Config::getins()->getBaseConfig().strLogPath))
{
LogError << "创建日志文件服务失败,请检查目录是否存在:" << Config::getins()->getBaseConfig().strLogPath;
}
svr.Get("/hello", [](const httplib::Request& req, httplib::Response& res){
res.set_content("Hello, World!", "text/plain");
});
svr.Get("/stop_api", [&](const httplib::Request& req, httplib::Response& res){
svr.stop();
exit(1);
});
svr.Get("/queryBaseSetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->queryBaseSetting();
res.set_content(response.toStyledString(), "application/json");
});
svr.Post("/updateBaseSetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->updateBaseSetting(req.body);
res.set_content(response.toStyledString(), "application/json");
});
svr.Get("/queryLogSetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->queryLogSetting();
res.set_content(response.toStyledString(), "application/json");
});
svr.Post("/updateLogSetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->updateLogSetting(req.body);
res.set_content(response.toStyledString(), "application/json");
});
svr.Get("/queryDataSource", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->queryDataSource();
res.set_content(response.toStyledString(), "application/json");
});
svr.Post("/updateDataSource", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->updateDataSource(req.body);
res.set_content(response.toStyledString(), "application/json");
});
svr.Get("/queryIdentifySetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->queryIdentifySetting();
res.set_content(response.toStyledString(), "application/json");
});
svr.Post("/updateIdentifySetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->updateIdentifySetting(req.body);
res.set_content(response.toStyledString(), "application/json");
});
svr.Get("/queryHttpSetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->queryHttpSetting();
res.set_content(response.toStyledString(), "application/json");
});
svr.Post("/updateHttpSetting", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->updateHttpSetting(req.body);
res.set_content(response.toStyledString(), "application/json");
});
svr.Get("/identify_start", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->identifyStart();
res.set_content(response.toStyledString(), "application/json");
});
svr.Get("/identify_stop", [=](const httplib::Request& req, httplib::Response& res){
Json::Value response = this->identifyStop();
res.set_content(response.toStyledString(), "application/json");
});
int port = Config::getins()->getBaseConfig().iApiPort;
LogInfo << "开启API服务端口:" << port;
if (!svr.listen("0.0.0.0", port, 1))
{
LogError << "监听端口 " << port << " 失败API接口无法使用";
}
return APP_ERR_OK;
}
/**
*
* @return
*/
Json::Value ApiEngine::queryBaseSetting()
{
Json::Value response;
response["success"] = false;
try {
BaseConfig baseConfig = Config::getins()->getBaseConfig();
Json::Value data;
data["track_name"] = baseConfig.strTrackName;
data["test_model"] = baseConfig.bTestModel;
data["api_port"] = baseConfig.iApiPort;
data["up_result"] = baseConfig.bUpResult;
data["result_save_days"] = baseConfig.iResultSaveDays;
response["success"] = true;
response["msg"] = "";
response["data"] = data;
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @param req
* @return
*/
Json::Value ApiEngine::updateBaseSetting(const std::string &req)
{
Json::Value response;
response["success"] = false;
try {
Json::CharReaderBuilder readerBuilder;
std::istringstream iss(req);
Json::Value root;
std::string errs;
bool parsingSuccessful = Json::parseFromStream(readerBuilder, iss, &root, &errs);
if (!parsingSuccessful)
{
response["msg"] = "接口调用参数异常所传数据非json" + req;
return response;
}
if (root["track_name"].isString() && root["track_name"].asString() != Config::getins()->config_["base"]["track_name"].as<std::string>())
{
Config::getins()->config_["base"]["track_name"] = root["track_name"].asString();
}
if (root["test_model"].isBool())
{
Config::getins()->config_["base"]["test_model"] = root["test_model"].asBool();
}
if (root["api_port"].isInt())
{
Config::getins()->config_["base"]["api_port"] = root["api_port"].asInt();
}
if (root["up_result"].isBool())
{
Config::getins()->config_["base"]["up_result"] = root["up_result"].asBool();
}
if (root["result_save_days"].isInt())
{
Config::getins()->config_["base"]["result_save_days"] = root["result_save_days"].asInt();
}
Config::getins()->writeYaml();
response["success"] = true;
response["msg"] = "";
response["data"] = {};
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败!请求参数:" + req + "\n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @return
*/
Json::Value ApiEngine::queryLogSetting()
{
Json::Value response;
response["success"] = false;
try {
LogConfig config = Config::getins()->getLogConfig();
Json::Value data;
data["out_level"] = config.strOutLevel;
data["save_level"] = config.strSaveLevel;
response["success"] = true;
response["msg"] = "";
response["data"] = data;
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @param req
* @return
*/
Json::Value ApiEngine::updateLogSetting(const std::string &req)
{
Json::Value response;
response["success"] = false;
try {
Json::CharReaderBuilder readerBuilder;
std::istringstream iss(req);
Json::Value root;
std::string errs;
bool parsingSuccessful = Json::parseFromStream(readerBuilder, iss, &root, &errs);
if (!parsingSuccessful)
{
response["msg"] = "接口调用参数异常所传数据非json" + req;
return response;
}
if (root["out_level"].isString() && root["out_level"].asString() != Config::getins()->config_["log"]["out_level"].as<std::string>())
{
Config::getins()->config_["log"]["out_level"] = root["out_level"].asString();
}
if (root["save_level"].isString() && root["save_level"].asString() != Config::getins()->config_["log"]["save_level"].as<std::string>())
{
Config::getins()->config_["log"]["save_level"] = root["save_level"].asString();
}
Config::getins()->writeYaml();
response["success"] = true;
response["msg"] = "";
response["data"] = {};
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败!请求参数:" + req + "\n异常内容:" + std::string(e.what());
}
return response;
}
/**
* 使
* @return
*/
Json::Value ApiEngine::queryDataSource()
{
Json::Value response;
response["success"] = false;
try {
DataSourceConfig dataSourceConfig = Config::getins()->getDataSourceConfig();
Json::Value data;
data["url"] = dataSourceConfig.strUrl;
data["skip_interval"] = dataSourceConfig.iSkipInterval;
data["direction"] = dataSourceConfig.iDirection;
data["left_first"] = dataSourceConfig.iLeftFirst;
data["right_first"] = dataSourceConfig.iRightFirst;
response["success"] = true;
response["msg"] = "";
response["data"] = data;
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @param req
* @return
*/
Json::Value ApiEngine::updateDataSource(const std::string &req)
{
Json::Value response;
response["success"] = false;
try {
Json::CharReaderBuilder readerBuilder;
std::istringstream iss(req);
Json::Value root;
std::string errs;
bool parsingSuccessful = Json::parseFromStream(readerBuilder, iss, &root, &errs);
if (!parsingSuccessful)
{
response["msg"] = "接口调用参数异常所传数据非json" + req;
return response;
}
if (root["url"].isString() && root["url"].asString() != Config::getins()->config_["data_source"]["url"].as<std::string>())
{
Config::getins()->config_["data_source"]["url"] = root["url"].asString();
}
if (root["skip_interval"].isInt())
{
Config::getins()->config_["data_source"]["skip_interval"] = root["skip_interval"].asInt();
}
if (root["direction"].isInt())
{
Config::getins()->config_["data_source"]["direction"] = root["direction"].asInt();
}
if (root["left_first"].isInt())
{
Config::getins()->config_["data_source"]["left_first"] = root["left_first"].asInt();
}
if (root["right_first"].isInt())
{
Config::getins()->config_["data_source"]["right_first"] = root["right_first"].asInt();
}
Config::getins()->writeYaml();
response["success"] = true;
response["msg"] = "";
response["data"] = {};
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败!请求参数:" + req + "\n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @return
*/
Json::Value ApiEngine::queryIdentifySetting()
{
Json::Value response;
response["success"] = false;
try {
IdentifyConfig config = Config::getins()->getIdentifyConfig();
Json::Value data;
data["identify_direction"] = config.strIdentifyDirection;
data["train_heard_detect"] = config.bTrainHeardDetect;
response["success"] = true;
response["msg"] = "";
response["data"] = data;
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @param req
* @return
*/
Json::Value ApiEngine::updateIdentifySetting(const std::string &req)
{
Json::Value response;
response["success"] = false;
try {
Json::CharReaderBuilder readerBuilder;
std::istringstream iss(req);
Json::Value root;
std::string errs;
bool parsingSuccessful = Json::parseFromStream(readerBuilder, iss, &root, &errs);
if (!parsingSuccessful)
{
response["msg"] = "接口调用参数异常所传数据非json" + req;
return response;
}
if (root["identify_direction"].isString() && root["identify_direction"].asString() != Config::getins()->config_["identify"]["identify_direction"].as<std::string>())
{
Config::getins()->config_["identify"]["identify_direction"] = root["identify_direction"].asString();
}
if (root["train_heard_detect"].isBool() && root["train_heard_detect"].asBool() != Config::getins()->config_["identify"]["train_heard_detect"].as<bool>())
{
Config::getins()->config_["identify"]["train_heard_detect"] = root["train_heard_detect"].asBool();
}
Config::getins()->writeYaml();
response["success"] = true;
response["msg"] = "";
response["data"] = {};
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败!请求参数:" + req + "\n异常内容:" + std::string(e.what());
}
return response;
}
/**
* http服务器配置参数
* @return
*/
Json::Value ApiEngine::queryHttpSetting()
{
Json::Value response;
response["success"] = false;
try {
HttpServerConfig httpServerConfig = Config::getins()->getHttpServerConfig();
Json::Value data;
data["is_use"] = httpServerConfig.bIsUse;
data["http_ip"] = httpServerConfig.strIp;
data["http_port"] = httpServerConfig.iPort;
data["token_path"] = httpServerConfig.strTokenUrl;
data["up_result_path"] = httpServerConfig.strUpResultUrl;
data["device_status_url"] = httpServerConfig.strUpDeviceStatusUrl;
data["username"] = httpServerConfig.strUserName;
data["password"] = httpServerConfig.strPassword;
response["success"] = true;
response["msg"] = "";
response["data"] = data;
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}
/**
*
* @param req
* @return
*/
Json::Value ApiEngine::updateHttpSetting(const std::string &req)
{
Json::Value response;
response["success"] = false;
try {
Json::CharReaderBuilder readerBuilder;
std::istringstream iss(req);
Json::Value root;
std::string errs;
bool parsingSuccessful = Json::parseFromStream(readerBuilder, iss, &root, &errs);
if (!parsingSuccessful)
{
response["msg"] = "接口调用参数异常所传数据非json" + req;
return response;
}
if (root["is_use"].isBool() && root["is_use"].asBool() != Config::getins()->config_["http_server"]["is_use"].as<bool>())
{
Config::getins()->config_["http_server"]["is_use"] = root["is_use"].asBool();
}
if (root["http_ip"].isString() && root["http_ip"].asString() != Config::getins()->config_["http_server"]["http_ip"].as<std::string>())
{
Config::getins()->config_["http_server"]["http_ip"] = root["http_ip"].asString();
}
if (root["http_port"].isInt() && root["http_port"].asInt() != Config::getins()->config_["http_server"]["http_port"].as<int>())
{
Config::getins()->config_["http_server"]["http_port"] = root["http_port"].asInt();
}
if (root["token_path"].isString() && root["token_path"].asString() != Config::getins()->config_["http_server"]["token_path"].as<std::string>())
{
Config::getins()->config_["http_server"]["token_path"] = root["token_path"].asString();
}
if (root["up_result_path"].isString() && root["up_result_path"].asString() != Config::getins()->config_["http_server"]["up_result_path"].as<std::string>())
{
Config::getins()->config_["http_server"]["up_result_path"] = root["up_result_path"].asString();
}
if (root["device_status_url"].isString() && root["device_status_url"].asString() != Config::getins()->config_["http_server"]["device_status_url"].as<std::string>())
{
Config::getins()->config_["http_server"]["device_status_url"] = root["device_status_url"].asString();
}
if (root["username"].isString() && root["username"].asString() != Config::getins()->config_["http_server"]["username"].as<std::string>())
{
Config::getins()->config_["http_server"]["username"] = root["username"].asString();
}
if (root["password"].isString() && root["password"].asString() != Config::getins()->config_["http_server"]["password"].as<std::string>())
{
Config::getins()->config_["http_server"]["password"] = root["password"].asString();
}
Config::getins()->writeYaml();
response["success"] = true;
response["msg"] = "";
response["data"] = {};
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败!请求参数:" + req + "\n异常内容:" + std::string(e.what());
}
return response;
}
Json::Value ApiEngine::identifyStart()
{
Json::Value response;
response["success"] = false;
try {
if (g_identify_type)
{
response["success"] = true;
response["msg"] = "程序已处在“识别”状态,请勿重复操作";
response["data"] = {};
}
else
{
g_identify_type = 1;
response["success"] = true;
response["msg"] = "程序已开启“识别”";
response["data"] = {};
}
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}
Json::Value ApiEngine::identifyStop()
{
Json::Value response;
response["success"] = false;
try {
if (!g_identify_type)
{
response["success"] = true;
response["msg"] = "程序已处在“停止”状态,请勿重复操作";
response["data"] = {};
}
else
{
g_identify_type = 0;
response["success"] = true;
response["msg"] = "程序已停止“识别”";
response["data"] = {};
}
}
catch (std::exception &e)
{
response["msg"] = "接口调用失败! \n异常内容:" + std::string(e.what());
}
return response;
}

View File

@ -0,0 +1,54 @@
/**
* API接口 engine
* */
#ifndef INC_APIENGINE_H
#define INC_APIENGINE_H
#include <arpa/inet.h>
#include "AppCommon.h"
#include "EngineBase.h"
#include "EngineFactory.h"
#include "json/json.h"
#include "httplib.h"
class ApiEngine : public ai_matrix::EngineBase
{
public:
ApiEngine();
~ApiEngine();
APP_ERROR Init() override;
APP_ERROR DeInit() override;
APP_ERROR Process() override;
protected:
private:
std::string strPort0_;
std::string strPort1_;
Json::Value queryBaseSetting();
Json::Value updateBaseSetting(const std::string &req);
Json::Value queryLogSetting();
Json::Value updateLogSetting(const std::string &req);
Json::Value queryDataSource();
Json::Value updateDataSource(const std::string &req);
Json::Value queryIdentifySetting();
Json::Value updateIdentifySetting(const std::string &req);
Json::Value queryHttpSetting();
Json::Value updateHttpSetting(const std::string &req);
Json::Value identifyStart();
Json::Value identifyStop();
};
ENGINE_REGIST(ApiEngine)
#endif

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,71 @@
/**
* engine
* */
#ifndef INC_VIDEOENGINE_H
#define INC_VIDEOENGINE_H
#include <arpa/inet.h>
#include "AppCommon.h"
//编译器这部分代码按C语言而不是C++)的方式进行编译
#ifdef __cplusplus
extern "C"
{
#endif // #ifdef __cplusplus
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#ifdef __cplusplus
}
#endif // #ifdef __cplusplus
#include "EngineBase.h"
#include "EngineFactory.h"
typedef struct
{
int64_t i64Timeout;
} TimeoutContext;
//#define RTSP_PULL_CAMERA_VIDEO_STREAM
class VideoEngine : public ai_matrix::EngineBase
{
public:
VideoEngine();
~VideoEngine();
APP_ERROR Init() override;
APP_ERROR DeInit() override;
APP_ERROR Process() override;
protected:
AVFormatContext *CreateFormatContext();
APP_ERROR GetStreamInfo();
APP_ERROR ConnectCamera(); //连接相机
void ResetCamera(); //复位相机连接
//static int InterruptCallback(void *pData);
private:
AVFormatContext *pFormatCtx_ = nullptr;
AVCodecParameters *pCodecParameters_ = nullptr;
int iVideoStream_ = 0;
bool bIsAvc_ = false;
int iAudioStream_ = -1;
ai_matrix::DataSourceConfig dataSourceConfig_;
DataSourceInfo frameInfo_;
bool bConnectFlag_ = true; //默认不重连相机
std::string strPort0_;
std::string strPort1_;
};
ENGINE_REGIST(VideoEngine)
#endif

View File

@ -0,0 +1,170 @@
#include "HardDecode.h"
using namespace std;
HardDecode::HardDecode()
{
;
}
HardDecode::~HardDecode()
{
;
}
int HardDecode::hardDecoderInit(unsigned int uiWidth, unsigned int uiHeight, unsigned int uiFrameRate, AVCodecParameters *pCodecParameters)
{
uiWidth_ = uiWidth; uiHeight_ = uiHeight;
uiFrameRate_ = uiFrameRate;
this->pCodecParameters_ = pCodecParameters;
av_log_set_level(AV_LOG_ERROR);
// AVCodecID codec_id = AV_CODEC_ID_H264; //解码H264
// pCodec_ = avcodec_find_decoder(codec_id); //获取解码器
pCodec_ = avcodec_find_decoder(this->pCodecParameters_->codec_id);
if (pCodec_ == nullptr) {
LogError << "cannot find decoder !";
return 0;
}
//创建上下文
pCodecCtx_ = avcodec_alloc_context3(pCodec_);
if (!pCodecCtx_){
LogError << "Could not allocate video codec context";
return 0;
}
// 将AVCodecParameters结构体中码流参数拷贝到AVCodecContext结构体中,format, width, height, codec_type等
avcodec_parameters_to_context(pCodecCtx_, this->pCodecParameters_);
if (!pCodecCtx_) {
LogError << "Cannot alloc context.";
return 0;
}
//打开解码器
int iRet = 1;
if ((iRet = avcodec_open2(pCodecCtx_, pCodec_, nullptr)) < 0) {
LogError << "cannot open decoder, Ret:" << iRet;
return 0;
}
//分配packet
pPacket_ = av_packet_alloc();
if (!pPacket_){
LogError << "Could not allocate video packet";
return 0;
}
// av_init_packet(pPacket_);
//分配frame
pSrcFrame_ = av_frame_alloc();
if (!pSrcFrame_) {
LogError << "Could not allocate video src pFrame";
return 0;
}
pDstFrame_ = av_frame_alloc();
if (!pDstFrame_) {
LogError << "Could not allocate video dst pFrame";
return 0;
}
// printf("after align down video_width: %d, video_height: %d\n", uiWidth_, uiHeight_);
int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
pCodecCtx_->width,
pCodecCtx_->height, 1);
outBuffer_ = (unsigned char *) av_malloc(bufferSize);
av_image_fill_arrays(pDstFrame_->data,
pDstFrame_->linesize,
outBuffer_,
AV_PIX_FMT_YUV420P,
pCodecCtx_->width,
pCodecCtx_->height, 1);
pSwsContext_ = sws_getContext(pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt,
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, nullptr, nullptr, nullptr);
return 1;
}
int HardDecode::hardDecoderDeInit()
{
if (pCodecParameters_){
av_free(pCodecParameters_);
pCodecParameters_ = nullptr;
}
if(outBuffer_){
av_free(outBuffer_);
outBuffer_ = nullptr;
}
if(pSrcFrame_){
av_frame_free(&pSrcFrame_);
pSrcFrame_ = nullptr;
}
if(pDstFrame_){
av_frame_free(&pDstFrame_);
pDstFrame_ = nullptr;
}
if(pPacket_){
av_packet_free(&pPacket_);
pPacket_ = nullptr;
}
if(pCodecParserCtx_){
av_parser_close(pCodecParserCtx_);
pCodecParserCtx_ = nullptr;
}
if(pCodecCtx_){
avcodec_close(pCodecCtx_);
av_free(pCodecCtx_);
pCodecCtx_ = nullptr;
}
if(pSwsContext_){
sws_freeContext(pSwsContext_);
pSwsContext_ = nullptr;
}
}
int HardDecode::hardDecoder(void* pOutputData,unsigned int* puiOutputDataSize)
{
int ret;
ret = avcodec_send_packet(this->pCodecCtx_, this->pPacket_); //接收packet解码
if (ret < 0) {
LogError << "Error sending a packet for decoding";
return 0;
}
while (ret >= 0) {
ret = avcodec_receive_frame(this->pCodecCtx_, this->pSrcFrame_); //解码
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
LogWarn << "During decoding eof";
return 0;
}
else if (ret < 0) {
LogError << "Error during decoding";
return 0;
}
sws_scale(this->pSwsContext_,
(const uint8_t *const *)this->pSrcFrame_->data,
this->pSrcFrame_->linesize,
0,
this->pCodecCtx_->height,
this->pDstFrame_->data,
this->pDstFrame_->linesize);
fflush(stdout);
int iSize = this->pCodecCtx_->width * this->pCodecCtx_->height;
memcpy(pOutputData, this->pDstFrame_->data[0], iSize); //Y
memcpy(pOutputData+iSize, this->pDstFrame_->data[1], iSize/4); //U
memcpy(pOutputData+iSize+iSize/4, this->pDstFrame_->data[2], iSize/4); //V
*puiOutputDataSize = iSize*3/2;
return iSize*3/2;
}
return 0;
}

View File

@ -0,0 +1,91 @@
//硬件H264 ffmpeg解码
#ifndef _HARDDECODE_H
#define _HARDDECODE_H
#include <iostream>
#include <chrono>
#include <cmath>
#include <utility>
#include <thread>
#include <chrono>
#include <functional>
#include <atomic>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <queue>
#include <mutex>
#include <semaphore.h>
#include <algorithm>
#include <string>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <vector>
#include <memory>
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
#define DUMP_FRAME(frame) { \
printf( "%s-%d AVFrame:format=%2d, key_frame=%d, pict_type=%d, width=%4d, height=%4d, data=(%d, %d, %d), linesize=(%4d, %4d, %4d)\n", \
__func__, __LINE__, \
frame->format, frame->key_frame, frame->pict_type, \
frame->width, frame->height, \
(frame->data[0] != NULL), \
(frame->data[1] != NULL), \
(frame->data[2] != NULL),\
frame->linesize[0], \
frame->linesize[1], \
frame->linesize[2] \
);}
#define NVIDIA_H264_DECODER "h264_cuvid"
// #define NVIDIA_H264_DECODER "h264_v4l2m2m"
#include "Log.h"
class HardDecode
{
public:
HardDecode();
~HardDecode();
int hardDecoderInit(unsigned int uiWidth, unsigned int uiHeight, unsigned int uiFrameRate = 30, AVCodecParameters *pCodecParameters = nullptr);
int hardDecoderDeInit();
int hardDecoder(void* pOutputData, unsigned int* puiOutputDataSize);
const AVCodec *pCodec_ = nullptr; //解码器
AVCodecContext *pCodecCtx_ = nullptr; //上下文
AVCodecParserContext *pCodecParserCtx_ = nullptr; //解析器上下文
AVFrame *pSrcFrame_ = nullptr;
AVFrame *pDstFrame_ = nullptr;
AVPacket *pPacket_ = nullptr;
SwsContext *pSwsContext_ = nullptr;
uint8_t *outBuffer_ = nullptr;
private:
unsigned int uiWidth_, uiHeight_;
unsigned int uiFrameRate_;
AVCodecParameters *pCodecParameters_ = nullptr;
};
#endif

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,58 @@
/**
*
* */
#ifndef VIDEODECODEENGINE_H
#define VIDEODECODEENGINE_H
#include <iostream>
#include <chrono>
#include <cmath>
#include <utility>
#include <thread>
#include <chrono>
#include <functional>
#include <atomic>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <queue>
#include <mutex>
#include <semaphore.h>
#include <algorithm>
#include <string>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <vector>
#include <memory>
#include "EngineBase.h"
#include "EngineFactory.h"
#include "AppCommon.h"
#include "HardDecode.h"
class VideoDecodeEngine : public ai_matrix::EngineBase
{
public:
VideoDecodeEngine();
~VideoDecodeEngine();
APP_ERROR Init() override;
APP_ERROR DeInit() override;
APP_ERROR Process() override;
private:
std::string strPort0_;
HardDecode* harddecoder_ = nullptr;
ai_matrix::DataSourceConfig dataSourceConfig_;
bool bUseEngine_;
};
ENGINE_REGIST(VideoDecodeEngine)
#endif

View File

@ -0,0 +1,347 @@
#include "MoveEngine.h"
//#include <opencv2/opencv.hpp>
#include "myqueue.h"
using namespace ai_matrix;
extern bool g_bHaveTrainFlag;
extern bool g_bNoDealStepTwoFlag;
MoveEngine::MoveEngine() {}
MoveEngine::~MoveEngine() {}
APP_ERROR MoveEngine::Init()
{
strPort0_ = engineName_ + "_" + std::to_string(engineId_) + "_0";
modelConfig_ = Config::getins()->getModelByMoveConfig();
identifyConfig_ = Config::getins()->getIdentifyConfig();
baseConfig_ = Config::getins()->getBaseConfig();
if (identifyConfig_.bNeedMoveDetectFlag)
{
// 读取模型信息
APP_ERROR ret = ReadModelInfo();
if (ret != APP_ERR_OK)
{
LogError << "Failed to read model info, ret = " << ret;
return ret;
}
ret = InitModel();
if (ret != APP_ERR_OK)
{
LogError << "Failed to read model info, ret = " << ret;
return ret;
}
}
InitParam();
LogInfo << "MoveEngine Init ok";
return APP_ERR_OK;
}
APP_ERROR MoveEngine::InitModel()
{
modelinfo.modelCommonInfo.uiModelWidth = model_width;
modelinfo.modelCommonInfo.uiModelHeight = model_height;
modelinfo.modelCommonInfo.uiInputSize = input_size;
modelinfo.modelCommonInfo.uiOutputSize = output_size;
modelinfo.modelCommonInfo.uiChannel = INPUT_CHANNEL;
modelinfo.modelCommonInfo.uiBatchSize = batch_size;
modelinfo.modelCommonInfo.strInputBlobName = INPUT_BLOB_NAME;
modelinfo.modelCommonInfo.strOutputBlobName = OUTPUT_BLOB_NAME;
std::string strModelName = "";
int nRet = yolov8model.YoloV8InferenceInit(modelinfo, strModelName, modelConfig_.strModelPath);
if (nRet != 0)
{
LogInfo << "YoloV5ClassifyInferenceInit nRet:" << nRet;
return APP_ERR_COMM_READ_FAIL;
}
return APP_ERR_OK;
}
APP_ERROR MoveEngine::ReadModelInfo()
{
// Check the validity of model path
int iFolderExist = access(modelConfig_.strModelPath.c_str(), R_OK);
if (iFolderExist == -1)
{
LogError << "ModelPath " << modelConfig_.strModelPath << " doesn't exist or read failed!";
return APP_ERR_COMM_NO_EXIST;
}
return APP_ERR_OK;
}
APP_ERROR MoveEngine::DeInit()
{
if (identifyConfig_.bNeedMoveDetectFlag)
{
yolov8model.YoloV8InferenceDeinit();
}
LogInfo << "MoveEngine DeInit ok";
return APP_ERR_OK;
}
/**
* ()
* inParam : N/A
* outParam: N/A
* return : N/A
*/
void MoveEngine::InitParam()
{
iStepInter_ = 0;
iMoveDataNO_ = 1;
}
void MoveEngine::sendComeTrain() {
std::string message = "{\"cometime\":\"" + this->strTrainDate_ + " " + this->strTrainTime_ + "\",\"type\":\"1\"}";
outputQueMap_[engineName_ + "_" + std::to_string(engineId_) + "_1"]->push(std::static_pointer_cast<void>(std::make_shared<std::string>(message)));
}
void MoveEngine::sendEndTrain() {
std::string message = "{\"cometime\":\"" + this->strTrainDate_ + " " + this->strTrainTime_ + "\",\"type\":\"0\"}";
outputQueMap_[engineName_ + "_" + std::to_string(engineId_) + "_1"]->push(std::static_pointer_cast<void>(std::make_shared<std::string>(message)));
}
/**
* 使device处理
* inParam : std::shared_ptr<ProcessData> pProcessData
* outParam: N/A
* return : N/A
*/
void MoveEngine::SingleDeviceProcess(std::shared_ptr<ProcessData> pProcessData, int iType)
{
if(iMoveDataNO_ == 1)
{
strTrainDate_ = TimeUtil::getins()->getDate();
strTrainTime_ = TimeUtil::getins()->getTime();
std::string strDirTrainTime = StringUtil::getins()->replace_all_distinct(strTrainTime_, ":", "-");
//创建该该列车的数据存储目录(防止后续多线程创建报错)
std::string strTrainPath = baseConfig_.strDebugResultPath + "/" + strTrainDate_ + "/" + strDirTrainTime + "/";
FileUtil::getins()->createDirPath(strTrainPath);
std::string strBestImgPath = baseConfig_.strResultPath + "/" + strTrainDate_ + "/" + strDirTrainTime + "/";
FileUtil::getins()->createDirPath(strBestImgPath);
}
pProcessData->strTrainDate = strTrainDate_;
pProcessData->strTrainTime = strTrainTime_;
pProcessData->iFrameId = iMoveDataNO_;
//组织数据, push其他端口 (只通知2次车来一次车结束一次)
if (iMoveDataNO_ == 1 || pProcessData->bIsTrainEnd || (iPreMonitorState_ != iType))
{
if(iPreMonitorState_ != iType){
iPreMonitorState_ = iType;
}
LogDebug << "来车检测--> 日期:" << strTrainDate_ << " 来车时间:" << strTrainTime_
<< " 帧:" << pProcessData->iFrameId << " 结束状态:" << pProcessData->bIsTrainEnd;
std::shared_ptr<MoveData> pMoveData = std::make_shared<MoveData>();
pMoveData->iFrameId = iMoveDataNO_; //当前帧号
pMoveData->i64TimeStamp = pProcessData->sourceFrameData.i64TimeStamp;
pMoveData->bHasTrain = true;
pMoveData->bIsEnd = pProcessData->bIsTrainEnd;
pMoveData->strTrainDate = strTrainDate_;
pMoveData->strTrainName = strTrainTime_;
pMoveData->iTrainStage = iType;
//通知第一步开始识别
// outputQueMap_[engineName_ + "_" + std::to_string(engineId_) + "_5"]->push(std::static_pointer_cast<void>(pMoveData));
}
pProcessData->iDataNO = iMoveDataNO_++;
//push端口,存图
std::shared_ptr<SaveImgData> pSaveImgData = std::make_shared<SaveImgData>();
pSaveImgData->strImgPath = baseConfig_.strDebugResultPath + "/" + pProcessData->strTrainDate + "/"
+ StringUtil::getins()->replace_all_distinct(pProcessData->strTrainTime, ":", "-");
pSaveImgData->strImgName = std::to_string(pProcessData->iFrameId) + ".jpg";
// pSaveImgData->strTrainStage = intTrainStage_2_str(iType);
pSaveImgData->frameData.pData = pProcessData->sourceFrameData.pData;
pSaveImgData->frameData.iSize = pProcessData->sourceFrameData.iSize;
pSaveImgData->frameData.iWidth = pProcessData->dataSourceInfo.iWidth;
pSaveImgData->frameData.iHeight = pProcessData->dataSourceInfo.iHeight;
pSaveImgData->frameData.i64TimeStamp = pProcessData->sourceFrameData.i64TimeStamp;
outputQueMap_[strPort0_]->push(std::static_pointer_cast<void>(pSaveImgData));
}
APP_ERROR MoveEngine::Process()
{
int iRet = APP_ERR_OK;
while (!isStop_)
{
//pop端口0
std::shared_ptr<void> pVoidData0 = nullptr;
inputQueMap_[strPort0_]->pop(pVoidData0);
if (nullptr == pVoidData0)
{
usleep(1000); //1ms
continue;
}
std::shared_ptr<ProcessData> pProcessData = std::static_pointer_cast<ProcessData>(pVoidData0);
int iType = MONITOR_MODEL_INIT_STATE;
//1. 无需动态检测
if (!identifyConfig_.bNeedMoveDetectFlag)
{
SingleDeviceProcess(pProcessData, MONITOR_MODEL_INIT_STATE);
if (pProcessData->bIsTrainEnd)
{
InitParam();
}
continue;
}
bool bGetTrainExist = false;
if (pProcessData->sourceFrameData.pData != nullptr && pProcessData->sourceFrameData.iSize != 0)
{
cv::Mat img; // BGR
cv::Mat img_gray(pProcessData->dataSourceInfo.iHeight,
pProcessData->dataSourceInfo.iWidth,
CV_8UC1);
cv::cvtColor(cv::Mat(pProcessData->dataSourceInfo.iHeight,
pProcessData->dataSourceInfo.iWidth,
CV_8UC3,
static_cast<uint8_t*>(pProcessData->sourceFrameData.pData.get())),
img_gray, cv::COLOR_BGR2GRAY);
cv::cvtColor(img_gray, img, cv::COLOR_GRAY2BGR);
// auto start = std::chrono::system_clock::now(); // 计时开始
float fReturnVal[STEP0_OUTPUT_ARRAY];
memset(fReturnVal, 0x00, sizeof(fReturnVal));
yolov8model.YoloV8InferenceModelGetType(img,
fReturnVal,
STEP0_OUTPUT_ARRAY * sizeof(float));
float fScore = 0.0f;
for(int n = 0; n < 5; n++){
if(fReturnVal[n] > fScore){
fScore = fReturnVal[n];
iType = n;
}
}
// LogDebug <<"模型得分 车头:"<< fReturnVal[0] << " 车头车身交接:" << fReturnVal[1] <<" 无车:"<< fReturnVal[2]<<" 车尾:"<< fReturnVal[3]<<" 有车:"<< fReturnVal[4];
switch (iType) {
case MONITOR_MODEL_TRAIN_HEAD:
LogDebug << "来车状态:车头";
break;
case MONITOR_MODEL_HEAD_FIRST:
LogDebug << "来车状态:车头车身之间";
break;
case MONITOR_MODEL_NO_TRAIN:
LogDebug << "来车状态:无车";
break;
case MONITOR_MODEL_TRAIN_TAIL:
LogDebug << "来车状态:车尾";
break;
case MONITOR_MODEL_TRAIN_BODY:
LogDebug << "来车状态:车厢";
break;
default:
LogDebug << "来车状态:未定义";
break;
}
if (this->identifyConfig_.bTrainHeardDetect)
{
bGetTrainExist = (iType == MONITOR_MODEL_TRAIN_BODY
|| iType == MONITOR_MODEL_TRAIN_HEAD
|| iType == MONITOR_MODEL_HEAD_FIRST
|| iType == MONITOR_MODEL_TRAIN_TAIL);
}
else
{
bGetTrainExist = (iType == MONITOR_MODEL_TRAIN_BODY
|| iType == MONITOR_MODEL_TRAIN_TAIL
|| iType == MONITOR_MODEL_HEAD_FIRST);
}
// uint64_t i64Time = 0;
// auto end = std::chrono::system_clock::now();
// i64Time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
}
if (bGetTrainExist)
{
iHasTrainNum_ = iHasTrainNum_ > 20 ? iHasTrainNum_ : iHasTrainNum_ + 1;
// if (iHasTrainNum_ > 0) LogDebug << "当前有车, 计数:" << iHasTrainNum_;
}
else
{
iHasTrainNum_ = iHasTrainNum_ == 0 ? iHasTrainNum_ : iHasTrainNum_ - 1;
if (iHasTrainNum_ == 0) LogInfo << "----- 当前无车 -----";
}
// g_bHaveTrainFlag = bGetTrainExist;
//有车开始识别
if (iHasTrainNum_ > 0)
{
if (iStepInter_ != 1 && queProcessData_.size() < 3)
{
queProcessData_.push(pProcessData);
LogDebug << "iStepInter_: " << iStepInter_ << " queSize:" << queProcessData_.size() << " continue";
continue;
}
// if (iStepInter_ != 1) this->sendComeTrain();
iStepInter_ = 1;
}
//无车停止识别
else
{
if (iStepInter_ == 1)
{
iStepInter_ = 2;
// this->sendEndTrain();
}
while (!queProcessData_.empty())
{
LogDebug << "while iStepInter_: " << iStepInter_ << " queSize:" << queProcessData_.size();
queProcessData_.pop();
}
}
//有车识别处理
if (iStepInter_ != 0)
{
pProcessData->iStatus = TRAINSTATUS_RUN;
pProcessData->bIsTrainEnd = (iStepInter_ == 2 ? true : false); //动态检测无车,设置列车结束标识
SingleDeviceProcess(pProcessData, iType);
if (iStepInter_ == 2)
{
// this->sendEndTrain();
InitParam();
}
}
iPreMonitorState_ = iType;
}
}
std::string MoveEngine::intTrainStage_2_str(int iTrainStage)
{
switch (iTrainStage) {
case MONITOR_MODEL_TRAIN_HEAD:
return "train_head";
case MONITOR_MODEL_HEAD_FIRST:
return "head_and_carriage";
case MONITOR_MODEL_NO_TRAIN:
return "no_train";
case MONITOR_MODEL_TRAIN_TAIL:
return "train_tail";
case MONITOR_MODEL_TRAIN_BODY:
return "carriage";
default:
return "";
}
}

View File

@ -0,0 +1,89 @@
/**
* engine
* */
#ifndef MOVEENGINE_H
#define MOVEENGINE_H
#include "AppCommon.h"
#include "EngineBase.h"
#include "EngineFactory.h"
#include "YoloV8Inference.h"
#define STEP0_BATCH_SIZE 1
#define STEP0_INPUT_CHANNEL 3 //输入通道数
#define STEP0_INPUT_H 640 //输入图像高
#define STEP0_INPUT_W 640 //输入图像宽
#define INPUT_CHANNEL 3 //输入通道数
#define STEP0_INPUT_SIZE STEP0_BATCH_SIZE*STEP0_INPUT_CHANNEL*STEP0_INPUT_W*STEP0_INPUT_H
#define STEP0_OUTPUT_ARRAY 4
#define STEP0_OUTPUT_SIZE 1 * STEP0_OUTPUT_ARRAY * 2 // BOX
#define STEP0_CLEAR_NUM 0
#define STEP0_CLASS_NUM 4
class MoveEngine : public ai_matrix::EngineBase
{
public:
MoveEngine();
~MoveEngine();
APP_ERROR Init() override;
APP_ERROR DeInit() override;
APP_ERROR Process() override;
private:
//初始化识别模型
APP_ERROR InitModel();
//获取模型配置
APP_ERROR ReadModelInfo();
//参数初始化
void InitParam();
//使用单device处理
void SingleDeviceProcess(std::shared_ptr<ProcessData> pProcessData, int nType);
std::string intTrainStage_2_str(int iTrainStage);
void sendComeTrain();
void sendEndTrain();
bool bNeedMoveDetectFlag_;
std::string strPort0_;
ai_matrix::ModelConfig modelConfig_;
ai_matrix::IdentifyConfig identifyConfig_;
ai_matrix::BaseConfig baseConfig_;
// std::string strResultPath_;
// std::string strBestPath_;
YoloV8Inference yolov8model;
int iStepInter_ = 0; //(0:不识别; 1:开始识别; 2:结束识别)
uint32_t iMoveDataNO_ = 1; //动态检测数据编号
int iHasTrainNum_ = 0; //有车的图象数
int iPreMonitorState_ = MONITOR_MODEL_INIT_STATE;
std::string strTrainDate_;
std::string strTrainTime_;
std::set<int> setPushPort_;
ai_matrix::DataSourceConfig dataSourceCfg_;
std::string INPUT_BLOB_NAME = "images"; // deploy文件中定义的输入层名称
std::string OUTPUT_BLOB_NAME = "output0"; // deploy文件中定义的输出层名称
unsigned int img_width = IMAGE_WIDTH;
unsigned int img_height = IMAGE_HEIGHT;
unsigned int model_width = STEP0_INPUT_W;
unsigned int model_height = STEP0_INPUT_H;
unsigned int clear_num = STEP0_CLEAR_NUM;
unsigned int class_num = STEP0_CLASS_NUM;
unsigned int input_size = STEP0_INPUT_SIZE;
unsigned int output_size = STEP0_OUTPUT_SIZE;
unsigned int det_size = STEP0_CLASS_NUM + STEP0_CLEAR_NUM + 5;
unsigned int batch_size = STEP0_BATCH_SIZE;
YoloV5ModelInfo modelinfo;
std::queue<std::shared_ptr<ProcessData>> queProcessData_;
};
ENGINE_REGIST(MoveEngine)
#endif

View File

@ -12,6 +12,7 @@
using namespace ai_matrix;
std::atomic_bool app_flag(true);
std::atomic<int> g_identify_type(0);
void SigHandler(int iSigno)
{
@ -23,26 +24,40 @@ void SigHandler(int iSigno)
//定义配置文件地址
std::string strConfigYamlPath = "./config/config.yaml";
std::string strModelConfigYamlPath = "./config/model_config.yaml";
std::string strEngineYamlPath = "./config/matrix.yaml";
int main(int argc, const char *argv[])
{
if (argc > 1)
{
strConfigYamlPath = argv[1];
printf("config_path:%s\n", strConfigYamlPath.c_str());
}
//加载配置文件
int iRetYaml = Config::GetIns()->readYaml(strConfigYamlPath);
int iRetYaml = Config::getins()->readYaml(strConfigYamlPath);
if (-1 == iRetYaml)
{
LogError << "read yaml file error";
LogError << "read config.yaml file error";
return -1;
}
ai_matrix::BaseConfig baseConfig = Config::GetIns()->getBaseConfig();
ai_matrix::LogConfig logConfig = Config::GetIns()->getLogConfig();
iRetYaml = Config::getins()->readModelYaml(strModelConfigYamlPath);
if (-1 == iRetYaml)
{
LogError << "read model_config.yaml file error";
return -1;
}
ai_matrix::BaseConfig baseConfig = Config::getins()->getBaseConfig();
ai_matrix::LogConfig logConfig = Config::getins()->getLogConfig();
//设置日志信息
MatrixAiLog::Log::SetLogLevel(logConfig.strLevel);
MatrixAiLog::Log::SetLogLevel(logConfig.strOutLevel);
MatrixAiLog::Log::SetLogFile(baseConfig.strLogPath);
if (!FileUtil::getins()->CreateDirPath("./logs"))
if (!FileUtil::getins()->createDirPath("./logs"))
{
LogError << "日志目录创建失败";
return -1;