模板项目

This commit is contained in:
Mr.V 2024-05-23 20:37:45 +08:00
parent 7b0865c89a
commit 097da26334
65 changed files with 19216 additions and 0 deletions

235
src/CMakeLists.txt Normal file
View File

@ -0,0 +1,235 @@
# CMake lowest version requirement
cmake_minimum_required(VERSION 3.21)
# project information
project(train_RFID_Linux)
project(${PROJECT_NAME} VERSION 0.1 DESCRIPTION "RFID识别程序 Linux版")
add_definitions(-std=c++11)
add_definitions(-DAPI_EXPORTS)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)
option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
# Set target output directory
set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/)
set(CMAKE_MODULE_PATH ${PROJECT_SRC_ROOT}/CMake)
#
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_SRC_ROOT}/app)
#
set(CMAKE_SKIP_BUILD_RPATH FALSE)
set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
set(CMAKE_INSTALL_RPATH "./lib")
#
find_package(Threads REQUIRED)
# sftp
find_package(Libssh2)
# boost
set(BOOST_INCLUDEDIR /usr/local/include)
find_package(Boost 1.65.0 EXACT REQUIRED COMPONENTS system filesystem)
include_directories(${Boost_INCLUDE_DIRS})
message(STATUS "Using Boost ${Boost_VERSION}")
# opencv
#find_package(OpenCV REQUIRED)
#message(STATUS "Using OpenCV ${OpenCV_VERSION}")
# CUDA
#find_package(CUDA REQUIRED)
#message(STATUS "Using CUDA ${CUDA_VERSION}")
#
set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -O0 -Wfatal-errors -pthread -w -g")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11 -O0 -Xcompiler -fPIC -g -w ${CUDA_GEN_CODE}")
# /
set(SYS_USR_INCLUDE_DIR "/usr/include")
set(SYS_USR_LIB_DIR "/usr/lib")
set(SYS_USR_LOCAL_INCLUDE_DIR "/usr/local/include")
set(SYS_USR_LOCAL_LIB_DIR "/usr/local/lib")
set(X86_LINUX_INCLUDE_DIR "/usr/include/x86_64-linux-gnu")
set(X86_LINUX_LIB_DIR "/usr/lib/x86_64-linux-gnu")
#OpenCV
#set(OPENCV_INCLUDE_DIR ${SYS_USR_LOCAL_INCLUDE_DIR}/opencv4)
#set(OPENCV_LIB_DIR ${SYS_USR_LOCAL_LIB_DIR})
#CUDA
#set(CUDA_DIR "/usr/local/cuda-11.7")
#set(CUDA_INCLUDE_DIR ${CUDA_DIR}/include)
#set(CUDA_LIB_DIR ${CUDA_DIR}/lib64)
#TensorRT
#set(TENSORRT_INCLUDE_DIR ${X86_LINUX_INCLUDE_DIR}) #tensorrt/usr/include/aarch64-linux-gnu
#set(TENSORRT_LIB_DIR ${X86_LINUX_LIB_DIR}) #tensorrt/usr/lib/aarch64-linux-gnu
#ai_matrix
set(ai_matrix_Folder ${PROJECT_SRC_ROOT}/ai_matrix)
get_filename_component(ai_matrix_ABS_DIR ${ai_matrix_Folder} ABSOLUTE)
file(GLOB_RECURSE ai_matrix_SRC_FILES
${ai_matrix_ABS_DIR}/framework/*cpp
${ai_matrix_ABS_DIR}/myqueue/*cpp
${ai_matrix_ABS_DIR}/myshell/*cpp
${ai_matrix_ABS_DIR}/mylog/*cpp
${ai_matrix_ABS_DIR}/Utils/*cpp
${ai_matrix_ABS_DIR}/Http/*cpp
${ai_matrix_ABS_DIR}/Config/*cpp
)
include_directories(${ai_matrix_Folder})
include_directories(${ai_matrix_Folder}/framework)
include_directories(${ai_matrix_Folder}/myqueue)
include_directories(${ai_matrix_Folder}/myshell)
include_directories(${ai_matrix_Folder}/mylog)
include_directories(${ai_matrix_Folder}/Utils)
include_directories(${ai_matrix_Folder}/Http)
include_directories(${ai_matrix_Folder}/Config)
# DealRfidEngine
include_directories(${PROJECT_SRC_ROOT}/engine/DealRfidEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/DealRfidEngine DealRfidEngine_SRC)
# GetRfidEngine
include_directories(${PROJECT_SRC_ROOT}/engine/GetRfidEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/GetRfidEngine GetRfidEngine_SRC)
# HttpUpResultEngine
include_directories(${PROJECT_SRC_ROOT}/engine/HttpUpResultEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/HttpUpResultEngine HttpUpResultEngine_SRC)
# SaveResultEngine
include_directories(${PROJECT_SRC_ROOT}/engine/SaveResultEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/SaveResultEngine SaveResultEngine_SRC)
# SaveRfidEngine
include_directories(${PROJECT_SRC_ROOT}/engine/SaveRfidEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/SaveRfidEngine SaveRfidEngine_SRC)
# SocketServerDemoEngine
include_directories(${PROJECT_SRC_ROOT}/engine/VideoAuxiliaryEngine)
aux_source_directory(${PROJECT_SRC_ROOT}/engine/VideoAuxiliaryEngine VideoAuxiliaryEngine_SRC)
#
include_directories(
#base include
${PROJECT_SOURCE_DIR}/base/BlockingQueue
${PROJECT_SOURCE_DIR}/base/CBase64
${PROJECT_SOURCE_DIR}/base/CommandParser
${PROJECT_SOURCE_DIR}/base/CommonDataType
${PROJECT_SOURCE_DIR}/base/ConfigParser
${PROJECT_SOURCE_DIR}/base/ErrorCode
${PROJECT_SOURCE_DIR}/base/FileManager
${PROJECT_SOURCE_DIR}/base/Log
#code include
${PROJECT_SOURCE_DIR}/code/common
# ${PROJECT_SOURCE_DIR}/code/cuda_utils
# ${PROJECT_SOURCE_DIR}/code/model
# ${PROJECT_SOURCE_DIR}/code/preprocess
# ${PROJECT_SOURCE_DIR}/code/inference
# ${PROJECT_SOURCE_DIR}/code/postprocess
${PROJECT_SOURCE_DIR}/code/BaseSocket
${PROJECT_SOURCE_DIR}/code/BaseComPort
#third party include
# ${CUDA_INCLUDE_DIR}
# ${TENSORRT_INCLUDE_DIR}
# ${OpenCV_DIR}
${X86_LINUX_INCLUDE_DIR}
${SYS_USR_LOCAL_INCLUDE_DIR}
)
#
link_directories(${SYS_USR_LOCAL_LIB_DIR}
${X86_LINUX_LIB_DIR}
# ${OPENCV_LIB_DIR}
# ${CUDA_LIB_DIR}
# ${TENSORRT_LIB_DIR}
)
#
file(
GLOB_RECURSE SRCS_LISTS
${ai_matrix_SRC_FILES}
#base src
${PROJECT_SOURCE_DIR}/base/CommandParser/*.cpp
${PROJECT_SOURCE_DIR}/base/ConfigParser/*.cpp
${PROJECT_SOURCE_DIR}/base/ErrorCode/*.cpp
${PROJECT_SOURCE_DIR}/base/FileManager/*.cpp
${PROJECT_SOURCE_DIR}/base/Log/*.cpp
#code src
${PROJECT_SOURCE_DIR}/code/common/*.cpp
# ${PROJECT_SOURCE_DIR}/code/cuda_utils/*.cpp
# ${PROJECT_SOURCE_DIR}/code/preprocess/*.cu
# ${PROJECT_SOURCE_DIR}/code/inference/*.cu
# ${PROJECT_SOURCE_DIR}/code/postprocess/*.cpp
${PROJECT_SOURCE_DIR}/code/BaseSocket/*.cpp
${PROJECT_SOURCE_DIR}/code/BaseComPort/*.cpp
# engine
${DealRfidEngine_SRC}
${GetRfidEngine_SRC}
${HttpUpResultEngine_SRC}
${SaveResultEngine_SRC}
${SaveRfidEngine_SRC}
${VideoAuxiliaryEngine_SRC}
)
#
add_executable(${PROJECT_NAME}
${PROJECT_SRC_ROOT}/main.cpp
${SRCS_LISTS}
)
# TensorRT
#target_link_libraries(
# ${PROJECT_NAME}
# nvinfer
# nvonnxparser
# nvcaffe_parser
# nvinfer_plugin
#)
target_link_libraries(
${PROJECT_NAME}
${Boost_LIBRARIES}
# ${CUDA_LIBRARIES}
# ${OpenCV_LIBS}
${_REFLECTION}
# FFmpeg
avformat
avcodec
avutil
avfilter
swresample
swscale
postproc
rt
dl
pthread
yaml-cpp
https_sn
jsoncpp
curl
ssh2
crypto
-Wl,-z,relro,-z,now,-z,noexecstack -pie -s
)

View File

@ -0,0 +1,180 @@
#include "Config.h"
namespace ai_matrix
{
Config *Config::pInstance_ = nullptr;
Config::GarbageCollector Config::gc_;
std::mutex Config::mx_;
Config *Config::GetIns()
{
//双层锁,确保线程安全
if (pInstance_ == nullptr)
{
std::lock_guard<std::mutex> guard(mx_); //防止异常发生不能解锁
if (pInstance_ == nullptr)
{
pInstance_ = new Config();
}
}
return pInstance_;
}
int Config::readYaml(std::string &strPath)
{
try
{
strConfigYamlPath_ = strPath;
config_ = YAML::LoadFile(strPath);
//退出程序
if (config_.IsNull())
{
printf("config.yaml no find");
return -1;
}
// 控制参数
this->baseConfig_.strTrackName = config_["base"]["track_name"].as<std::string>();
this->baseConfig_.bTestModel = config_["base"]["test_model"].as<bool>();
this->baseConfig_.iConnectType = config_["base"]["connect_type"].as<int>();
this->baseConfig_.bUpResult = config_["base"]["up_result"].as<bool>();
this->baseConfig_.strLogPath = config_["base"]["log_path"].as<std::string>();
this->baseConfig_.strResultPath = config_["base"]["result_path"].as<std::string>();
this->baseConfig_.iResultSaveDays = config_["base"]["result_save_days"].as<int>();
// 日志参数
this->logConfig_.strLevel = config_["log"]["level"].as<std::string>();
// http服务器参数
this->httpServerConfig_.bIsUse = config_["http_server"]["is_use"].as<bool>();
this->httpServerConfig_.strIp = config_["http_server"]["http_ip"].as<std::string>();
this->httpServerConfig_.iPort = config_["http_server"]["http_port"].as<int>();
this->httpServerConfig_.strTokenUrl = config_["http_server"]["token_path"].as<std::string>();
this->httpServerConfig_.strUpResultUrl = config_["http_server"]["up_result_path"].as<std::string>();
this->httpServerConfig_.strUserName = config_["http_server"]["username"].as<std::string>();
this->httpServerConfig_.strPassword = config_["http_server"]["password"].as<std::string>();
}
catch (...) //捕获所有异常
{
return -1;
}
return 0;
}
int Config::writeYaml()
{
try
{
std::ofstream of(strConfigYamlPath_);
of << config_;
of.close();
}
catch (...) //捕获所有异常
{
printf("yaml文件不存在\n");
return -1;
}
return 0;
}
std::string Config::getStringValue(const char *pszKey, const YAML::Node *pConfig/*=nullptr*/) const
{
if(nullptr == pConfig)
{
pConfig = &config_;
}
if (!(*pConfig)[pszKey].IsDefined())
{
printf("key:[%s] not exist!\n", pszKey);
}
return (*pConfig)[pszKey].as<std::string>();
}
int Config::getIntValue(const char *pszKey, const YAML::Node *pConfig/*=nullptr*/) const
{
if (nullptr == pConfig)
{
pConfig = &config_;
}
if (!(*pConfig)[pszKey].IsDefined())
{
printf("key:[%s] not exist!\n", pszKey);
}
return (*pConfig)[pszKey].as<int>();
}
bool Config::getBoolValue(const char *pszKey, const YAML::Node *pConfig/*=nullptr*/) const
{
if (nullptr == pConfig)
{
pConfig = &config_;
}
if (!(*pConfig)[pszKey].IsDefined())
{
printf("key:[%s] not exist!\n", pszKey);
}
return (*pConfig)[pszKey].as<bool>();
}
float Config::getFloatValue(const char *pszKey, const YAML::Node *pConfig/*=nullptr*/) const
{
if (nullptr == pConfig)
{
pConfig = &config_;
}
if (!(*pConfig)[pszKey].IsDefined())
{
printf("key:[%s] not exist!\n", pszKey);
}
return (*pConfig)[pszKey].as<float>();
}
std::string Config::getPathValue(const char *pszKey, const YAML::Node *pConfig /*=nullptr*/) const
{
if (nullptr == pConfig)
{
pConfig = &config_;
}
if (!(*pConfig)[pszKey].IsDefined())
{
printf("key:[%s] not exist!\n", pszKey);
}
std::string strTmp = (*pConfig)[pszKey].as<std::string>();
if (strTmp.back() != '/')
{
strTmp += "/";
}
return strTmp;
}
BaseConfig Config::getBaseConfig() const
{
return this->baseConfig_;
}
void Config::setBaseConfig(const BaseConfig baseConfig)
{
this->baseConfig_ = baseConfig;
}
LogConfig Config::getLogConfig() const
{
return this->logConfig_;
}
void Config::setLogConfig(const LogConfig logConfig)
{
this->logConfig_ = logConfig;
}
HttpServerConfig Config::getHttpServerConfig() const
{
return this->httpServerConfig_;
}
void Config::setHttpServerConfig(const HttpServerConfig httpServerConfig) {
this->httpServerConfig_ = httpServerConfig;
}
}

View File

@ -0,0 +1,137 @@
/*
* @Author: your name
* @Date: 2022-02-08 15:59:33
* @LastEditors: your name
* @LastEditTime: 2022-02-16 10:39:05
* @Description: file content
* @FilePath: \lirs\code\MyYaml\MyYaml.h
*
* Copyright © 2022 <Shandong Matrix Software Engineering Co., Ltd>
*/
#ifndef MYYAML_H_
#define MYYAML_H_
#include <mutex>
#include <fstream>
#include "yaml-cpp/yaml.h"
#include "Log.h"
namespace ai_matrix
{
// 基础控制参数
struct BaseConfig
{
// 股道名称
std::string strTrackName;
// 测试模式
bool bTestModel;
// 连接模式
int iConnectType;
// 是否上传识别结果
bool bUpResult;
// 日志文件目录
std::string strLogPath;
// 识别结果目录
std::string strResultPath;
// 日志存储天数
int iResultSaveDays;
};
// 日志参数
struct LogConfig{
// 日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
std::string strLevel;
};
// web服务器参数
struct HttpServerConfig
{
// 使用状态
bool bIsUse;
// 获取接口授权地址
std::string strIp;
// 通讯端口
int iPort;
// 获取接口授权地址
std::string strTokenUrl;
// 识别结果上传地址
std::string strUpResultUrl;
// 接口用户名
std::string strUserName;
// 接口密码
std::string strPassword;
};
class Config final
{
public:
static Config *GetIns();
// 读yaml文件
int readYaml(std::string &strPath);
// 写yaml文件
int writeYaml();
std::string getStringValue(const char *pszKey, const YAML::Node *pConfig = nullptr) const;
int getIntValue(const char *pszKey, const YAML::Node *pConfig = nullptr) const;
bool getBoolValue(const char *pszKey, const YAML::Node *pConfig = nullptr) const;
float getFloatValue(const char *pszKey, const YAML::Node *pConfig = nullptr) const;
std::string getPathValue(const char *pszKey, const YAML::Node *pConfig =nullptr) const;
// 初始化运行数据
// RunningData initRunningData();
// RunningData setRunningData(const RunningData runningData);
// RunningData getRunningData() const;
// 获取控制参数
BaseConfig getBaseConfig() const;
void setBaseConfig(const BaseConfig baseConfig);
// 获取日志参数
LogConfig getLogConfig() const;
void setLogConfig(const LogConfig logConfig);
// 获取web服务器参数
HttpServerConfig getHttpServerConfig() const;
void setHttpServerConfig(const HttpServerConfig httpServerConfig);
YAML::Node config_;
private:
Config() = default;
Config(const Config &) = delete;
Config(Config &&) = delete;
Config &operator=(const Config &) = delete;
Config &operator=(Config &&) = delete;
~Config() = default;
static Config *pInstance_;
static std::mutex mx_; //锁,保证线程安全
std::string strConfigYamlPath_;
// 控制参数
BaseConfig baseConfig_;
// 日志参数
LogConfig logConfig_;
// web服务器参数
HttpServerConfig httpServerConfig_;
//定义一个嵌套类,负责释放内存,操作系统自动完成,不用担心内存泄露
class GarbageCollector
{
public:
~GarbageCollector()
{
if (Config::pInstance_)
{
delete Config::pInstance_;
Config::pInstance_ = nullptr;
}
}
};
static GarbageCollector gc_;
};
}
#endif

9388
src/ai_matrix/Http/httplib.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,190 @@
//
// Created by matrixai on 4/2/24.
//
#include "FileUtil.h"
namespace ai_matrix
{
FileUtil *FileUtil::ins = nullptr;
FileUtil::GarbageCollector FileUtil::gc;
std::mutex FileUtil::mx;
FileUtil *FileUtil::getins()
{
//双层锁,确保线程安全
if (ins == nullptr)
{
std::lock_guard<std::mutex> guard(mx); //防止异常发生不能解锁
if (ins == nullptr)
{
ins = new FileUtil();
}
}
return ins;
}
/**
*
* inParam : std::string strDirPath :
* outParam: N/A
* return : true/false
*/
bool FileUtil::CreateDirPath(std::string strDirPath)
{
if (strDirPath.back() != '/')
{
strDirPath += "/";
}
if (access(strDirPath.c_str(), F_OK) == 0)
{
return true;
}
std::string::size_type pos = strDirPath.find('/');
while (pos != std::string::npos)
{
std::string strCur = strDirPath.substr(0, pos);
if (!strCur.empty() && access(strCur.c_str(), F_OK) != 0)
{
if (mkdir(strCur.c_str(), 0755) != 0) //如果父目录不存在,会创建失败
{
perror("mkdir fail");
return false;
}
if (chmod(strCur.c_str(), 0777) != 0)
{
perror("chmod fail");
}
}
pos = strDirPath.find('/', pos + 1);
}
return true;
}
std::string FileUtil::create_dir_name(std::string root, std::string name)
{
std::string dir_path = root + "/" + name;
if (access(dir_path.c_str(), F_OK) != 0)
{
if (mkdir(dir_path.c_str(), 0755) != 0) //如果父目录不存在,会创建失败
{
perror("mkdir fail");
return "";
}
}
return dir_path;
}
std::string FileUtil::create_dir_date(std::string root)
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
char tmp_date[20] = {0};
sprintf(tmp_date, "%04d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday);
std::string dir_path;
dir_path = root + "/" + std::string(tmp_date); //创建日期目录
if (access(dir_path.c_str(), F_OK) != 0)
{
if (mkdir(dir_path.c_str(), 0755) != 0) //如果父目录不存在,会创建失败
{
perror("mkdir fail");
return "";
}
}
return dir_path;
}
std::string FileUtil::create_dir_date_name_time(std::string root, std::string name)
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
char tmp_date[20] = {0};
sprintf(tmp_date, "%04d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday);
char tmp_time[20] = {0};
sprintf(tmp_time, "%02d-%02d-%02d", p->tm_hour, p->tm_min, p->tm_sec);
std::string dir_path;
dir_path = root + "/" + std::string(tmp_date); //创建日期目录
if (access(dir_path.c_str(), F_OK) != 0)
{
if (mkdir(dir_path.c_str(), 0755) != 0) //如果父目录不存在,会创建失败
{
perror("mkdir fail");
return "";
}
}
dir_path = dir_path + "/" + name + "_" + std::string(tmp_time); //创建点云目录
if (access(dir_path.c_str(), F_OK) != 0)
{
if (mkdir(dir_path.c_str(), 0755) != 0) //如果父目录不存在,会创建失败
{
perror("mkdir fail");
return "";
}
}
return dir_path;
}
std::string FileUtil::create_file_path(std::string root, std::string name, std::string suffix)
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
char tmp_date[20] = {0};
sprintf(tmp_date, "%04d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday);
char tmp_time[20] = {0};
sprintf(tmp_time, "%02d-%02d-%02d", p->tm_hour, p->tm_min, p->tm_sec);
std::string file_path;
file_path = root + "/" + std::string(tmp_date); //创建日期目录
if (access(file_path.c_str(), F_OK) != 0)
{
if (mkdir(file_path.c_str(), 0755) != 0) //如果父目录不存在,会创建失败
{
perror("mkdir fail");
return "";
}
}
file_path = file_path + "/" + name + "_" + std::string(tmp_time) + suffix; //创建文件
return file_path;
}
/**
*
* @param filePath
* @param savePath
* @return
*/
bool FileUtil::copyFile(std::string filePath, std::string savePath) {
FILE *fp, *sp;
fp = fopen(filePath.c_str(), "rb");
sp = fopen(savePath.c_str(), "w+b");
if (!fp) {
return false;
}
void *buffer;
while (!feof(fp)) {
fread(&buffer, 1, 1, fp);
fwrite(&buffer, 1, 1, sp);
}
fclose(fp);
fclose(sp);
return true;
}
}

View File

@ -0,0 +1,78 @@
//
// Created by matrixai on 4/2/24.
//
#ifndef TRAIN_RFID_LINUX_FILEUTIL_H
#define TRAIN_RFID_LINUX_FILEUTIL_H
#include <string>
#include <mutex>
#include <fstream>
#include <dirent.h>
#include <sys/stat.h>
#include <unistd.h>
#include <string>
#include <vector>
#include <algorithm>
#include <set>
#include <map>
#include <memory>
namespace ai_matrix
{
class FileUtil final
{
public:
static FileUtil *getins();
//创建文件夹
std::string create_dir_name(std::string root, std::string name);
std::string create_dir_date_name_time(std::string root, std::string name);
std::string create_dir_date(std::string root);
//创建文件路径
std::string create_file_path(std::string root, std::string name, std::string suffix);
//创建文件夹路径
bool CreateDirPath(std::string strDirPath);
/**
*
* @param filePath
* @param savePath
* @return
*/
bool copyFile(std::string filePath, std::string savePath);
private:
FileUtil() = default;
FileUtil(const FileUtil &) = delete;
FileUtil(FileUtil &&) = delete;
FileUtil &operator=(const FileUtil &) = delete;
FileUtil &operator=(FileUtil &&) = delete;
~FileUtil() = default;
//定义一个嵌套类,负责释放内存,操作系统自动完成,不用担心内存泄露
class GarbageCollector
{
public:
~GarbageCollector()
{
if (FileUtil::ins)
{
delete FileUtil::ins;
FileUtil::ins = nullptr;
}
}
};
static GarbageCollector gc;
static FileUtil *ins;
static std::mutex mx; //锁,保证线程安全
};
}
#endif //TRAIN_RFID_LINUX_FILEUTIL_H

View File

@ -0,0 +1,171 @@
//
// Created by matrixai on 4/2/24.
//
#include "StringUtil.h"
namespace ai_matrix
{
StringUtil *StringUtil::ins = nullptr;
StringUtil::GarbageCollector StringUtil::gc;
std::mutex StringUtil::mx;
StringUtil *StringUtil::getins()
{
//双层锁,确保线程安全
if (ins == nullptr)
{
std::lock_guard<std::mutex> guard(mx); //防止异常发生不能解锁
if (ins == nullptr)
{
ins = new StringUtil();
}
}
return ins;
}
/**
*
* @param str
* @param pattern
* @return vector<std::string>
*/
std::vector<std::string> StringUtil::split(std::string str, std::string pattern)
{
std::string::size_type pos;
std::vector<std::string> result;
str += pattern; //扩展字符串以方便操作
int size = str.size();
for (int i = 0; i < size; i++)
{
pos = str.find(pattern, i);
if (pos < size)
{
std::string s = str.substr(i, pos - i);
result.push_back(s);
i = pos + pattern.size() - 1;
}
}
return result;
}
/**
*
* @param str
* @return
*/
std::string StringUtil::trim(const std::string& str) {
std::string temp = str;
temp.erase(0, temp.find_first_not_of(" \t\n\r\f\v")); // 去除开头的空格
temp.erase(temp.find_last_not_of(" \t\n\r\f\v") + 1); // 去除末尾的空格
return temp;
}
/**
*
* @param str
* @param old_value
* @param new_value
* @return
*/
std::string& StringUtil::replace_all_distinct(std::string &str, const std::string &old_value, const std::string &new_value)
{
for (std::string::size_type pos(0); pos != std::string::npos; pos += new_value.length()) {
if ((pos = str.find(old_value, pos)) != std::string::npos)
str.replace(pos, old_value.length(), new_value);
else break;
}
return str;
}
/**
* Anchor行分割
* inParam : std::string &strLine Anchor一行内容
* : const std::set<char> &setDelimiters
* outParam: N/A
* return :
*/
std::vector<float> StringUtil::SplitAnchor(std::string &strLine, const std::set<char> &setDelimiters)
{
std::vector<float> result;
if (strLine.empty())
{
return result;
}
char const *pch = strLine.c_str();
char const *start = pch;
for (; *pch; ++pch)
{
if (setDelimiters.find(*pch) != setDelimiters.end())
{
if (start != pch)
{
std::string tmp(start, pch);
result.push_back(atof(tmp.c_str())); //浮点数
}
start = pch + 1;
}
}
result.push_back(atof(start)); //最后一个内容
return result;
}
/**
* float string
* @param f
* @return
*/
std::string StringUtil::getStringFromFloat(float f)
{
std::ostringstream buffer;
buffer << f;
return buffer.str();
}
/**
* bool string
* @param b
* @return
*/
std::string StringUtil::getStringFromBool(bool b)
{
std::ostringstream buffer;
buffer << b;
return buffer.str();
}
/**
* string int
* ()
* @param s
* @param b
* @return
*/
int StringUtil::string2int(const std::string s, bool &b) {
try {
b = true;
return std::stoi(this->trim(s));
} catch (...) {
b = false;
return 0;
}
}
/**
*
* @param str
* @return
*/
bool StringUtil::containsNonAlphaNum(const std::string &str) {
for (char c : str) {
if (!std::isalnum(c) && !std::isspace(c)) { // 如果不是字母或数字
return true; // 返回true表示存在非字母数字字符
}
}
return false; // 遍历完成未发现非字母数字字符返回false
}
bool StringUtil::is_digits(const std::string &str) {
return all_of(str.begin(), str.end(), ::isdigit);
}
}

View File

@ -0,0 +1,74 @@
//
// Created by matrixai on 4/2/24.
//
#ifndef TRAIN_RFID_LINUX_STRINGUTIL_H
#define TRAIN_RFID_LINUX_STRINGUTIL_H
#include <string>
#include <vector>
#include <mutex>
#include <set>
#include <sstream>
#include <bits/stdc++.h>
namespace ai_matrix
{
class StringUtil final
{
public:
static StringUtil *getins();
// 字符串分割函数
std::vector<std::string> split(std::string str, std::string pattern);
// 去掉字符串前后的空格
std::string trim(const std::string& str);
// 替换string中所有指定字符串
std::string& replace_all_distinct(std::string &str, const std::string &old_value, const std::string &new_value);
//
std::vector<float> SplitAnchor(std::string &strLine, const std::set<char> &setDelimiters);
// float 转 string
std::string getStringFromFloat(float f);
// bool 转 string
std::string getStringFromBool(bool b);
// string 转 int
int string2int(const std::string s, bool &b);
// 判断字符串中是否含有 字母和数字 之外的字符
bool containsNonAlphaNum(const std::string &str);
// 判断字符串中是否仅含有数字
// 检查函数
bool is_digits(const std::string& str);
private:
StringUtil() = default;
StringUtil(const StringUtil &) = delete;
StringUtil(StringUtil &&) = delete;
StringUtil &operator=(const StringUtil &) = delete;
StringUtil &operator=(StringUtil &&) = delete;
~StringUtil() = default;
//定义一个嵌套类,负责释放内存,操作系统自动完成,不用担心内存泄露
class GarbageCollector
{
public:
~GarbageCollector()
{
if (StringUtil::ins)
{
delete StringUtil::ins;
StringUtil::ins = nullptr;
}
}
};
static GarbageCollector gc;
static StringUtil *ins;
static std::mutex mx; //锁,保证线程安全
};
}
#endif //TRAIN_RFID_LINUX_STRINGUTIL_H

View File

@ -0,0 +1,211 @@
#include <cstring>
#include "TimeUtil.h"
namespace ai_matrix
{
const int TIME_DIFF = 28800; // 8 hour
TimeUtil *TimeUtil::ins = nullptr;
TimeUtil::GarbageCollector TimeUtil::gc;
std::mutex TimeUtil::mx;
TimeUtil *TimeUtil::getins()
{
//双层锁,确保线程安全
if (ins == nullptr)
{
std::lock_guard<std::mutex> guard(mx); //防止异常发生不能解锁
if (ins == nullptr)
{
ins = new TimeUtil();
}
}
return ins;
}
/**
*
* @return
*/
std::string TimeUtil::get_timestamp_file()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
struct timeval tv;
gettimeofday(&tv, NULL);
int msec = tv.tv_usec / 1000;
char tmp[30] = {0};
sprintf(tmp, "%04d-%02d-%02d-%02d-%02d-%02d-%03d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday, p->tm_hour, p->tm_min, p->tm_sec, msec);
return std::string(tmp);
}
/**
*
* @return yyyy-MM-dd hh:mm:ss.us
*/
std::string TimeUtil::getDateTime_usec()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
struct timeval tv;
gettimeofday(&tv, NULL);
int msec = tv.tv_usec / 1000;
char tmp[30] = {0};
sprintf(tmp, "%04d-%02d-%02d %02d:%02d:%02d.%03d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday, p->tm_hour, p->tm_min, p->tm_sec, msec);
return std::string(tmp);
}
/**
*
* @return : yyyy-mm-dd
*/
std::string TimeUtil::getDate()
{
struct timeval time = {0, 0};
gettimeofday(&time, nullptr);
time_t timep = time.tv_sec + TIME_DIFF;
struct tm *p = gmtime(&timep);
char szTmp[12] = {0};
sprintf(szTmp, "%04d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday);
return std::string(szTmp);
}
/**
*
* @return : hh:mm:ss
*/
std::string TimeUtil::getTime()
{
struct timeval time = {0, 0};
gettimeofday(&time, nullptr);
time_t timep = time.tv_sec + TIME_DIFF;
struct tm *p = gmtime(&timep);
char szTmp[10] = {0};
sprintf(szTmp, "%02d:%02d:%02d", p->tm_hour, p->tm_min, p->tm_sec);
return std::string(szTmp);
}
/**
* +
* @return yyyy-MM-dd hh:mm:ss
*/
std::string TimeUtil::getDateTime()
{
struct timeval time = {0, 0};
gettimeofday(&time, nullptr);
time_t timep = time.tv_sec + TIME_DIFF;
struct tm *p = gmtime(&timep);
int msec = time.tv_usec / 1000;
char szTmp[32] = {0};
sprintf(szTmp, "%04d-%02d-%02d %02d:%02d:%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday, p->tm_hour, p->tm_min, p->tm_sec);
return std::string(szTmp);
}
/**
*
* @return hh-mm-ss
*/
std::string TimeUtil::getTime_file()
{
time_t timep = time(NULL);
struct tm *p = localtime(&timep);
struct timeval tv;
gettimeofday(&tv, NULL);
int msec = tv.tv_usec / 1000;
char tmp[10] = { 0 };
sprintf(tmp, "%02d-%02d-%02d", p->tm_hour, p->tm_min, p->tm_sec);
return std::string(tmp);
}
/**
*
* inParam : N/A
* outParam: N/A
* return :
*/
/**
* 1970()
* @param usec
* @return
*/
uint64_t TimeUtil::getCurrentTimeMillis(bool usec)
{
struct timeval tv;
gettimeofday(&tv, NULL);
if (usec)
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
else
return tv.tv_sec;
}
/**
* 1970 (YYYY-MM-DD hh-mm-ss)
* inParam : std::string &strDateTime
* outParam: N/A
* return :
*/
uint64_t TimeUtil::getParamTimeMilliSeconds(std::string &strDateTime)
{
// LogDebug << "strDateTime:" << strDateTime;
if (strDateTime.length() != 19)
{
return 0;
}
int iYear = atoi(strDateTime.substr(0, 4).c_str());
int iMon = atoi(strDateTime.substr(5, 2).c_str());
int iDay = atoi(strDateTime.substr(8, 2).c_str());
int iHour = atoi(strDateTime.substr(11, 2).c_str());
int iMin = atoi(strDateTime.substr(14, 2).c_str());
int iSec = atoi(strDateTime.substr(17, 2).c_str());
struct tm stm;
memset(&stm, 0, sizeof(stm));
stm.tm_year = iYear - 1900;
stm.tm_mon = iMon - 1;
stm.tm_mday = iDay;
stm.tm_hour = iHour;
stm.tm_min = iMin;
stm.tm_sec = iSec;
uint64_t i64Ret = (uint64_t)mktime(&stm) * 1000;
return i64Ret;
}
/**
*
* inParam : uint64_t i64MilliSeconds
* outParam: N/A
* return :
*/
std::string TimeUtil::getDateTimeByMilliSeconds(uint64_t i64MilliSeconds)
{
time_t timep = i64MilliSeconds / 1000 + TIME_DIFF;
struct tm *p = gmtime(&timep);
char szTmp[32] = {0};
sprintf(szTmp, "%04d-%02d-%02d %02d-%02d-%02d", 1900 + p->tm_year, 1 + p->tm_mon, p->tm_mday, p->tm_hour, p->tm_min, p->tm_sec);
return std::string(szTmp);
}
}

View File

@ -0,0 +1,71 @@
#ifndef TimeUtil_H_
#define TimeUtil_H_
#include <mutex>
#include <sstream>
#include <time.h>
#include <sys/time.h>
#include <string>
#include <vector>
#include <algorithm>
#include <set>
#include <map>
#include <memory>
namespace ai_matrix
{
class TimeUtil final
{
public:
static TimeUtil *getins();
//获取时间戳
std::string get_timestamp_file();
std::string getDateTime_usec();
//获取日期
// std::string get_date();
//获取北京当前日期
std::string getDate();
//获取北京当前时间
std::string getTime();
// 获取当前 日期 时间
std::string getDateTime();
//获取用于文件名的时间
std::string getTime_file();
//获取当前时间距1970年的毫秒数
uint64_t getCurrentTimeMillis(bool usec = false);
//获取指定时间距1970年的毫秒数 (入参格式YYYY-MM-DD hh-mm-ss)
uint64_t getParamTimeMilliSeconds(std::string &strDateTime);
//获取指定毫秒数的对应的日期时间
std::string getDateTimeByMilliSeconds(uint64_t i64MilliSeconds);
private:
TimeUtil() = default;
TimeUtil(const TimeUtil &) = delete;
TimeUtil(TimeUtil &&) = delete;
TimeUtil &operator=(const TimeUtil &) = delete;
TimeUtil &operator=(TimeUtil &&) = delete;
~TimeUtil() = default;
//定义一个嵌套类,负责释放内存,操作系统自动完成,不用担心内存泄露
class GarbageCollector
{
public:
~GarbageCollector()
{
if (TimeUtil::ins)
{
delete TimeUtil::ins;
TimeUtil::ins = nullptr;
}
}
};
static GarbageCollector gc;
static TimeUtil *ins;
static std::mutex mx; //锁,保证线程安全
};
}
#endif

View File

@ -0,0 +1,180 @@
//
// Created by matrixai on 4/2/24.
//
#include "Utils.h"
namespace ai_matrix
{
Utils *Utils::ins = nullptr;
Utils::GarbageCollector Utils::gc;
std::mutex Utils::mx;
Utils *Utils::getins()
{
//双层锁,确保线程安全
if (ins == nullptr)
{
std::lock_guard<std::mutex> guard(mx); //防止异常发生不能解锁
if (ins == nullptr)
{
ins = new Utils();
}
}
return ins;
}
float Utils::getMean(const std::vector<float> &data)
{
int size = data.size();
if (size == 0)
{
return 0;
}
float sum = 0;
for (int i = 0; i < size; i++)
{
sum += data[i];
}
return sum / size;
}
float Utils::getMax(const std::vector<float> &data)
{
int size = data.size();
if (size == 0)
{
return 0;
}
float max = 0;
for (int i = 0; i < size; i++)
{
if (i == 0)
{
max = data[i];
}
else if (data[i] > max)
{
max = data[i];
}
}
return max;
}
float Utils::getMin(const std::vector<float> &data)
{
int size = data.size();
if (size == 0)
{
return 0;
}
float min = 0;
for (int i = 0; i < size; i++)
{
if (i == 0)
{
min = data[i];
}
else if (data[i] < min)
{
min = data[i];
}
}
return min;
}
float Utils::getMedian(const std::vector<float> &data)
{
if (data.size() < 2)
{
return 0;
}
std::vector<float> data_tmp;
int n = data.size();
for (int i = 0; i < n; i++)
{
data_tmp.push_back(data[i]);
}
std::sort(data_tmp.begin(), data_tmp.end(), std::less<float>());
if (n % 2 == 0)
{
return (data_tmp[n / 2] + data_tmp[n / 2 - 1]) / 2;
}
else
{
return data_tmp[(n - 1) / 2];
}
}
bool Utils::contains_vec(const std::vector<std::string> &vec, std::string x) {
return count(vec.begin(), vec.end(), x) > 0;
}
// /**
// * 读取json格式文件内容
// * inParam : std::string &strFilePath :文件路径
// * outParam: Json::Value &jvFileInfo :json格式内容
// * return : true/false
// */
// bool Utils::ReadJsonInfo(Json::Value &jvFileInfo, std::string &strFilePath)
// {
// std::ifstream ifs(strFilePath.c_str());
// if (!ifs.is_open())
// {
//// LogWarn << "txt:" << strFilePath << " open fail";
// return false;
// }
//
// std::string strContent;
// getline(ifs, strContent);
// ifs.close();
//
// Json::CharReaderBuilder jsrBuilder;
// std::shared_ptr<Json::CharReader> reader(jsrBuilder.newCharReader());
// JSONCPP_STRING errs;
// if (!reader->parse(strContent.data(), strContent.data() + strContent.size(), &jvFileInfo, &errs))
// {
//// LogWarn << "json parse fail content:" << strContent;
// return false;
// }
// return true;
// }
//
// /**
// * json格式内容写入文件
// * inParam : Json::Value &jvFileInfo :json格式内容
// : std::string &strFilePath :文件路径
// * outParam: N/A
// * return : true/false
// */
// bool Utils::WriteJsonInfo(Json::Value &jvFileInfo, std::string &strFilePath)
// {
// Json::StreamWriterBuilder jswBuilder;
// // 紧密型存储
// jswBuilder["indentation"] = "";
// std::string strFrameInfo = Json::writeString(jswBuilder, jvFileInfo);
//
// std::ofstream ofs(strFilePath.c_str());
// if (!ofs.is_open())
// {
// LogWarn << "txt:" << strFilePath << " open fail";
// return false;
// }
// ofs.write(strFrameInfo.c_str(), strFrameInfo.length());
// ofs.close();
// return true;
// }
}

View File

@ -0,0 +1,71 @@
//
// Created by matrixai on 4/2/24.
//
#ifndef TRAIN_RFID_LINUX_UTILS_H
#define TRAIN_RFID_LINUX_UTILS_H
#include <mutex>
#include <sstream>
#include <fstream>
#include <dirent.h>
#include <string>
#include <vector>
#include <algorithm>
#include <set>
#include <map>
#include <memory>
#include <cstring>
namespace ai_matrix
{
class Utils final
{
public:
static Utils *getins();
//计算均值
float getMean(const std::vector<float> &data);
//计算最大值
float getMax(const std::vector<float> &data);
//计算最小值
float getMin(const std::vector<float> &data);
//计算中位数
float getMedian(const std::vector<float> &data);
bool contains_vec(const std::vector<std::string> &vec, std::string x);
// //读取json格式文件内容
// bool ReadJsonInfo(Json::Value &jvFileInfo, std::string &strFilePath);
// //json格式内容写入文件
// bool WriteJsonInfo(Json::Value &jvFileInfo, std::string &strFilePath);
private:
Utils() = default;
Utils(const Utils &) = delete;
Utils(Utils &&) = delete;
Utils &operator=(const Utils &) = delete;
Utils &operator=(Utils &&) = delete;
~Utils() = default;
//定义一个嵌套类,负责释放内存,操作系统自动完成,不用担心内存泄露
class GarbageCollector
{
public:
~GarbageCollector()
{
if (Utils::ins)
{
delete Utils::ins;
Utils::ins = nullptr;
}
}
};
static GarbageCollector gc;
static Utils *ins;
static std::mutex mx; //锁,保证线程安全
};
}
#endif //TRAIN_RFID_LINUX_UTILS_H

View File

@ -0,0 +1,71 @@
#include "EngineBase.h"
namespace ai_matrix
{
//初始化engine
void EngineBase::AssignInitArgs(const EngineInitArguments &initArgs)
{
deviceId_ = initArgs.deviceId;
engineName_ = initArgs.engineName;
engineId_ = initArgs.engineId;
}
// get the data from input queue then call Process function in the new thread
void EngineBase::ProcessThread()
{
Process();
}
//设置engine输入队列
void EngineBase::SetInputMap(std::string engineAddress, std::shared_ptr<MyQueue<std::shared_ptr<void>>> inputQueue)
{
inputQueMap_[engineAddress] = inputQueue;
}
//得到输入队列
std::shared_ptr<MyQueue<std::shared_ptr<void>>> EngineBase::GetInputMap(std::string engineAddress)
{
if (inputQueMap_.find(engineAddress) == inputQueMap_.end())
{
return nullptr;
}
return inputQueMap_.at(engineAddress);
}
//设置engine输出队列
void EngineBase::SetOutputMap(std::string engineAddress, std::shared_ptr<MyQueue<std::shared_ptr<void>>> outputQue)
{
outputQueMap_[engineAddress] = outputQue;
}
//启动engine
APP_ERROR EngineBase::Run()
{
LogInfo << engineName_ << "[" << engineId_ << "] Run";
isStop_ = false;
processThr_ = std::thread(&EngineBase::ProcessThread, this);
return APP_ERR_OK;
}
// 停止engine
APP_ERROR EngineBase::Stop()
{
//停止线程
isStop_ = true;
//停止所有输入队列
for (auto it = inputQueMap_.begin(); it != inputQueMap_.end(); it++)
{
it->second->stop();
}
//等待线程结束
if (processThr_.joinable())
{
processThr_.join();
}
//其他清理
return DeInit();
}
}

View File

@ -0,0 +1,80 @@
/*
* engine基类
*/
#ifndef INC_ENGINE_BASE_H
#define INC_ENGINE_BASE_H
#include <unistd.h>
#include <thread>
#include <vector>
#include <map>
#include <atomic>
#include <sys/time.h>
#include "ErrorCode.h"
#include "Log.h"
#include "FileManager.h"
#include "CommonDataType.h"
#include "myqueue.h"
#include "myshell.h"
namespace ai_matrix
{
struct EngineInitArguments
{
int32_t deviceId = 0;
std::string engineName = {};
int engineId = -1;
void *userData = nullptr;
};
class EngineBase
{
public:
EngineBase() {}
virtual ~EngineBase() {} //必须是虚函数
void AssignInitArgs(const EngineInitArguments &initArgs);
//每个engine自己实现
virtual APP_ERROR Init(void) = 0;
virtual APP_ERROR DeInit(void) = 0;
APP_ERROR Run(void);
APP_ERROR Stop(void);
//设置输入队列
void SetInputMap(std::string engineAddress, std::shared_ptr<MyQueue<std::shared_ptr<void>>> inputQueue);
//得到输入队列
std::shared_ptr<MyQueue<std::shared_ptr<void>>> GetInputMap(std::string engineAddress);
//设置输出队列
void SetOutputMap(std::string engineAddress, std::shared_ptr<MyQueue<std::shared_ptr<void>>> outputQue);
//线程函数
void ProcessThread();
//每个engine自己实现线程执行函数
virtual APP_ERROR Process() = 0;
protected:
std::atomic_bool isStop_ = {true}; //线程默认不启动
int32_t deviceId_ = -1;
//输入队列map
std::map<std::string, std::shared_ptr<MyQueue<std::shared_ptr<void>>>> inputQueMap_ = {};
//输出队列map
std::map<std::string, std::shared_ptr<MyQueue<std::shared_ptr<void>>>> outputQueMap_ = {};
int engineId_ = -1; //实例化id
std::string engineName_ = {}; //类名
std::thread processThr_ = {};
};
}
#endif

View File

@ -0,0 +1,62 @@
/*
* c++
*/
#ifndef INC_ENGINE_FACTORY_H
#define INC_ENGINE_FACTORY_H
#include <string>
#include <map>
#include <functional>
//引擎注册宏
#define ENGINE_REGIST(class_name) \
namespace ai_matrix \
{ \
class class_name##Helper \
{ \
public: \
class_name##Helper() \
{ \
EngineFactory::RegisterEngine(#class_name, class_name##Helper::CreatObjFunc); \
} \
static void *CreatObjFunc() \
{ \
return new class_name; \
} \
}; \
static class_name##Helper class_name##helper; \
}
namespace ai_matrix
{
using Constructor = std::function<void *()>;
class EngineFactory
{
public:
static void RegisterEngine(std::string className, Constructor constructor)
{
Constructors()[className] = constructor;
}
static void *MakeEngine(const std::string &className)
{
auto itr = Constructors().find(className);
if (itr == Constructors().end())
{
return nullptr;
}
return ((Constructor)itr->second)();
}
private:
inline static std::map<std::string, Constructor> &Constructors()
{
static std::map<std::string, Constructor> instance;
return instance;
}
};
}
#endif

View File

@ -0,0 +1,250 @@
/**
* engine管理实现
* */
#include "EngineManager.h"
#include "Config.h"
#include "TimeUtil.h"
#include <curl/curl.h>
namespace ai_matrix
{
EngineManager::EngineManager() {}
EngineManager::~EngineManager() {}
//初始化acl
APP_ERROR EngineManager::Init()
{
if (!InitDeviceIds())
{
LogError << "InitDeviceIds err";
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
//去初始化acl
APP_ERROR EngineManager::DeInit(void)
{
return APP_ERR_OK;
}
//加载yaml文件中的配置
APP_ERROR EngineManager::load_yaml_config(std::string path)
{
try
{
YAML::Node config = YAML::LoadFile(path);
//退出程序
if (config.IsNull())
{
LogError << "matrix.yaml err";
return APP_ERR_COMM_INVALID_PARAM;
}
//engine使用deviceid
mapUseDevice_["ALL"] = *init_deviceIds_.begin(); //默认所有engine使用初始化中最小deviceid
if(config["use_deviceid"].IsDefined())
{
for (YAML::const_iterator it = config["use_deviceid"].begin(); it != config["use_deviceid"].end(); it++)
{
std::string engineInfo = it->first.as<std::string>();
int deviceid = it->second.as<int>();
//使用deviceid必须是经过初始化的
if (init_deviceIds_.count(deviceid) == 0)
{
LogError << "use_deviceid set err value:" << deviceid;
return APP_ERR_COMM_INVALID_PARAM;
}
mapUseDevice_[engineInfo] = deviceid;
}
}
//engine实例
for (YAML::const_iterator it = config["engines"].begin(); it != config["engines"].end(); it++)
{
std::string engine_name = it->first.as<std::string>();
int engine_id = it->second.as<int>();
//检查是否有重复engine
std::string engine_unique = engine_name + "_" + std::to_string(engine_id);
auto iter = engine_map_.find(engine_unique);
if (iter != engine_map_.end())
{
continue;
}
//实例化engine
std::shared_ptr<EngineBase> engineInstance = nullptr;
EngineBase* base = (static_cast<EngineBase*>(EngineFactory::MakeEngine(engine_name)));
if (base == nullptr)
{
continue;
}
engineInstance.reset(base);
//初始化engine
APP_ERROR ret = InitEngineInstance(engineInstance, engine_name, engine_id);
if (ret != APP_ERR_OK)
{
continue;
}
//存入map
engine_map_[engine_unique] = engineInstance;
}
//engine连接
for (YAML::const_iterator it = config["connects"].begin(); it != config["connects"].end(); it++)
{
std::string from = it->first.as<std::string>();
std::string to = it->second.as<std::string>();
int iPos = to.find(" ");
int iQueueSize = 0;
if (iPos != std::string::npos)
{
iQueueSize = atoi(to.substr(iPos+1, to.length()).c_str());
to = to.substr(0, iPos);
// LogInfo << "iQueueSize:" << iQueueSize;
}
LogInfo << "Add Connect,send:" << from << ",to:" << to;
std::size_t pos = from.find_last_of("_");
if (pos == std::string::npos)
{
continue;
}
std::string src_engine = from.substr(0, pos);
pos = to.find_last_of("_");
if (pos == std::string::npos)
{
continue;
}
std::string dst_engine = to.substr(0, pos);
auto iterSend = engine_map_.find(src_engine);
auto iterRecv = engine_map_.find(dst_engine);
if (iterSend == engine_map_.end() || iterRecv == engine_map_.end())
{
LogError << "Cann't find engine " << src_engine << " or " << dst_engine;
continue;
}
std::shared_ptr<MyQueue<std::shared_ptr<void>>> dataQueue = iterRecv->second->GetInputMap(to);
if (dataQueue == nullptr)
{
dataQueue = std::make_shared<MyQueue<std::shared_ptr<void>>>();
if (iQueueSize > 0)
{
dataQueue->setMaxSize(iQueueSize);
}
//设置engine输入队列
iterRecv->second->SetInputMap(to, dataQueue);
//设置engine输出队列
iterSend->second->SetOutputMap(from, dataQueue);
}
else
{
//设置engine输出队列
iterSend->second->SetOutputMap(from, dataQueue);
}
}
}
catch (...) //捕获所有异常
{
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
//初始化engine实例
APP_ERROR EngineManager::InitEngineInstance(std::shared_ptr<EngineBase> engineInstance, std::string engineName, int engineId)
{
LogInfo << "EngineManager: begin to init engine instance,name=" << engineName << ", engine id = " << engineId << ".";
//获取egnine使用的上下文
std::string engineInfo = engineName + "_" + std::to_string(engineId);
int deviceid;
if(mapUseDevice_.count(engineInfo) > 0)
{
deviceid = mapUseDevice_[engineInfo];
}
else if(mapUseDevice_.count(std::to_string(engineId)) > 0)
{
deviceid = mapUseDevice_[std::to_string(engineId)];
}
else
{
deviceid = mapUseDevice_["ALL"];
}
EngineInitArguments initArgs;
initArgs.deviceId = deviceId_;
initArgs.engineName = engineName;
initArgs.engineId = engineId;
engineInstance->AssignInitArgs(initArgs); //填充参数
APP_ERROR ret = engineInstance->Init(); //执行初始化
if (ret != APP_ERR_OK)
{
LogError << "EngineManager: fail to init engine, name = " << engineName << ", engine id = " << engineId << ".";
return ret;
}
LogInfo << "EngineManager: engine " << engineName << "[" << engineId << "] init success.";
return ret;
}
//运行所有engine
APP_ERROR EngineManager::RunAllEngine()
{
LogInfo << "begin to run engine.";
for (auto it = engine_map_.begin(); it != engine_map_.end(); it++)
{
it->second->Run();
}
return APP_ERR_OK;
}
//停止所有engine
APP_ERROR EngineManager::StopAllEngine()
{
LogInfo << "begin to stop engine.";
for (auto it = engine_map_.begin(); it != engine_map_.end(); it++)
{
it->second->Stop();
}
return APP_ERR_OK;
}
//得到engine指针
EngineBase *EngineManager::get_engine(std::string engineName)
{
auto iter = engine_map_.find(engineName);
if (iter == engine_map_.end())
{
return nullptr;
}
return iter->second.get();
}
/**
* id
* inParam : N/A
* outParam: N/A
* return : true();false()
*/
bool EngineManager::InitDeviceIds()
{
init_deviceIds_.insert(0);
return true;
}
}

View File

@ -0,0 +1,73 @@
/*
* engine管理类
*/
#ifndef INC_ENGINE_MANAGER_H
#define INC_ENGINE_MANAGER_H
#include "EngineBase.h"
#include "EngineFactory.h"
namespace ai_matrix
{
//引擎描述
struct EngineDesc
{
std::string engine_name; //引擎名称
int engine_id; //引擎id
};
struct EngineConnectDesc
{
std::string engine_send_name; //发送引擎名称
int engine_send_id; //发送引擎id
int engine_send_port; //发送引擎端口
std::string engine_recv_name; //接收引擎名称
int engine_recv_id; //接收引擎id
int engine_recv_port; //接收引擎端口
};
class EngineManager
{
public:
EngineManager();
~EngineManager();
//初始化
APP_ERROR Init();
//去初始化
APP_ERROR DeInit(void);
//使用yaml文件注册engine实例及连接
APP_ERROR load_yaml_config(std::string path);
//初始化engine
APP_ERROR InitEngineInstance(std::shared_ptr<EngineBase> engineInstance, std::string engineName, int engineId);
//运行所有engine
APP_ERROR RunAllEngine();
//停止所有engine
APP_ERROR StopAllEngine();
//得到某个engine的指针
EngineBase *get_engine(std::string engineName);
private:
int32_t deviceId_ = 0;
//engine名和实例map
std::map<std::string, std::shared_ptr<EngineBase>> engine_map_ = {};
//初始化的设备id
std::set<int> init_deviceIds_ = {};
//engine使用deviceid
std::map<std::string, int> mapUseDevice_ = {};
//初始化设备id
bool InitDeviceIds();
};
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
#include "mylog.h"
namespace ai_matrix
{
MyLog::~MyLog()
{
if (of_.is_open())
{
of_.close(); //关闭文件
}
}
std::string MyLog::get_name() const
{
return name_;
}
void MyLog::open_file(std::string filename)
{
if (of_.is_open())
{
of_.close(); //关闭文件
}
of_.open(filename.c_str(), std::ios_base::out | std::ios_base::app);
assert(of_.is_open() && "file create failed, please check the file's name and path.");
name_ = filename;
}
void MyLog::close()
{
if (of_.is_open())
{
of_.close(); //关闭文件
}
}
bool MyLog::is_open()
{
return of_.is_open();
}
void MyLog::write(std::string value)
{
if (of_.is_open() == false)
{
return;
}
long int usValue = 0;
struct timeval time = {0, 0};
gettimeofday(&time, nullptr);
time_t timep = time.tv_sec;
struct tm *ptm = gmtime(&timep); //返回tm结构的格林尼治时间GMT
char timeString[32] = {0};
//%F 年-月-日 %X 标准的时间串
strftime(timeString, 32, "[%F %X:", ptm);
usValue = time.tv_usec;
//时间
of_.fill('0');
of_ << timeString << std::setw(3) << usValue / 1000 << "]";
//位置
std::string file = __FILE__;
std::string fileName = file.substr(file.rfind('/') + 1);
of_ << "[" << fileName << " " << __FUNCTION__ << ":" << __LINE__ << "] ";
//内容
of_ << value << std::endl;
}
}

View File

@ -0,0 +1,48 @@
#ifndef MYLOG_H_
#define MYLOG_H_
#include <assert.h>
#include <unistd.h>
#include <string>
#include <map>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <vector>
#include <sys/time.h>
namespace ai_matrix
{
class MyLog final
{
public:
MyLog() = default;
MyLog(const MyLog &) = delete;
MyLog(MyLog &&) = delete;
MyLog &operator=(const MyLog &) = delete;
MyLog &operator=(MyLog &&) = delete;
~MyLog();
//获取日志文件名
std::string get_name() const;
//创建日志文件
void open_file(std::string filename);
//关闭日志文件
void close();
//判断日志文件是否打开
bool is_open();
//写入日志文件
void write(std::string value);
private:
std::ofstream of_;
std::string name_; //日志文件名
};
}
#endif

View File

@ -0,0 +1,111 @@
#ifndef MYQUEUE_H_
#define MYQUEUE_H_
#include <iostream>
#include <queue>
#include <list>
#include <mutex>
#include <condition_variable>
namespace ai_matrix
{
template <typename T>
class MyQueue final
{
public:
MyQueue(int max = 1024) : max_size_(max), is_stoped_(false) {}
MyQueue(const MyQueue &) = delete;
MyQueue(MyQueue &&) = delete;
MyQueue &operator=(const MyQueue &) = delete;
MyQueue &operator=(MyQueue &&) = delete;
~MyQueue() = default;
int push(const T &value, bool isWait = true)
{
std::unique_lock<std::mutex> lk(mutex_);
while (queue_.size() >= max_size_ && isWait && !is_stoped_)
{
printf("myqueue full");
cond_not_full_.wait(lk);
}
if (is_stoped_)
{
return 1;
}
if (queue_.size() >= max_size_)
{
return 2;
}
queue_.push(value);
cond_not_empty_.notify_one();
return 0;
}
int pop(T &value, bool isWait = false)
{
std::unique_lock<std::mutex> lk(mutex_);
while (queue_.empty() && isWait && !is_stoped_)
{
cond_not_empty_.wait(lk);
}
if (is_stoped_)
{
return 1;
}
if (queue_.empty())
{
return 2;
}
value = queue_.front();
queue_.pop();
cond_not_full_.notify_one();
return 0;
}
void stop()
{
{
std::unique_lock<std::mutex> lk(mutex_);
is_stoped_ = true;
}
cond_not_full_.notify_all();
cond_not_empty_.notify_all();
}
int getSize()
{
return queue_.size();
}
void setMaxSize(int iMaxSize)
{
max_size_ = iMaxSize;
}
private:
std::queue<T> queue_; //队列
std::condition_variable cond_not_empty_;
std::condition_variable cond_not_full_;
std::mutex mutex_;
int max_size_;
bool is_stoped_;
};
}
#endif

View File

@ -0,0 +1,84 @@
#include "myshell.h"
namespace ai_matrix
{
const int TIME_SIZE = 32;
const int TIME_DIFF = 28800; // 8 hour
std::mutex MyShell::mutex;
uint32_t MyShell::shellLevel = SHELL_LEVEL_INFO;
std::vector<std::string> MyShell::levelString{"[Debug]", "[Info ]", "[Warn ]", "[Error]", "[Fatal]"};
MyShell::MyShell(std::string file, std::string function, int line, uint32_t level)
: myLevel_(level), file_(file), function_(function), line_(line)
{
}
MyShell::~MyShell()
{
if (myLevel_ >= shellLevel)
{
std::lock_guard<std::mutex> locker(mutex);
// cout to screen
std::cout << ss_.str() << std::endl;
}
};
std::ostringstream &MyShell::Stream()
{
if (myLevel_ >= shellLevel)
{
struct timeval time = {0, 0};
gettimeofday(&time, nullptr);
time_t timep = time.tv_sec + TIME_DIFF; //加上8小时
struct tm *ptm = gmtime(&timep); //返回tm结构的格林尼治时间GMT
char timeString[TIME_SIZE] = {0};
//%F 年-月-日 %X 标准的时间串
strftime(timeString, TIME_SIZE, "[%F %X:", ptm);
long int usValue = time.tv_usec;
date_ = timeString;
ss_.fill('0');
ss_ << levelString[myLevel_] << timeString << std::setw(3) << usValue / 1000 << "]";
std::string fileName = file_.substr(file_.rfind('/') + 1);
ss_ << "[" << fileName << " " << function_ << ":" << line_ << "] ";
}
return ss_;
}
void MyShell::ShellDebugOn()
{
shellLevel = SHELL_LEVEL_DEBUG;
return;
}
void MyShell::ShellInfoOn()
{
shellLevel = SHELL_LEVEL_INFO;
return;
}
void MyShell::ShellWarnOn()
{
shellLevel = SHELL_LEVEL_WARN;
return;
}
void MyShell::ShellErrorOn()
{
shellLevel = SHELL_LEVEL_ERROR;
return;
}
void MyShell::ShellFatalOn()
{
shellLevel = SHELL_LEVEL_FATAL;
return;
}
void MyShell::ShellAllOn()
{
shellLevel = SHELL_LEVEL_DEBUG;
return;
}
void MyShell::ShellAllOff()
{
shellLevel = SHELL_LEVEL_NONE;
return;
}
} // namespace matrix_common

View File

@ -0,0 +1,61 @@
#ifndef MYSHELL_H
#define MYSHELL_H
#include <mutex>
#include <sstream>
#include <string>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sys/time.h>
#include <vector>
namespace ai_matrix
{
// shell level
enum ShellLevels
{
SHELL_LEVEL_DEBUG = 0,
SHELL_LEVEL_INFO = 1,
SHELL_LEVEL_WARN = 2,
SHELL_LEVEL_ERROR = 3,
SHELL_LEVEL_FATAL = 4,
SHELL_LEVEL_NONE
};
class MyShell final
{
public:
MyShell(std::string file, std::string function, int line, uint32_t level);
~MyShell();
std::ostringstream &Stream();
static void ShellDebugOn();
static void ShellInfoOn();
static void ShellWarnOn();
static void ShellErrorOn();
static void ShellFatalOn();
static void ShellAllOn();
static void ShellAllOff();
private:
std::ostringstream ss_;
uint32_t myLevel_;
std::string date_;
std::string file_;
std::string function_;
int line_;
static uint32_t shellLevel;
static std::vector<std::string> levelString;
static std::mutex mutex;
};
} // namespace ai_matrix
#define MyShellDebug ai_matrix::MyShell(__FILE__, __FUNCTION__, __LINE__, ai_matrix::SHELL_LEVEL_DEBUG).Stream()
#define MyShellInfo ai_matrix::MyShell(__FILE__, __FUNCTION__, __LINE__, ai_matrix::SHELL_LEVEL_INFO).Stream()
#define MyShellWarn ai_matrix::MyShell(__FILE__, __FUNCTION__, __LINE__, ai_matrix::SHELL_LEVEL_WARN).Stream()
#define MyShellError ai_matrix::MyShell(__FILE__, __FUNCTION__, __LINE__, ai_matrix::SHELL_LEVEL_ERROR).Stream()
#define MyShellFatal ai_matrix::MyShell(__FILE__, __FUNCTION__, __LINE__, ai_matrix::SHELL_LEVEL_FATAL).Stream()
#endif

View File

@ -0,0 +1,155 @@
#ifndef _NVIDIA_ACL_DATATYPE_H_
#define _NVIDIA_ACL_DATATYPE_H_
//ACL
typedef enum {
ACL_DT_UNDEFINED = -1,
ACL_FLOAT = 0,
ACL_FLOAT16 = 1,
ACL_INT8 = 2,
ACL_INT32 = 3,
ACL_UINT8 = 4,
ACL_INT16 = 6,
ACL_UINT16 = 7,
ACL_UINT32 = 8,
ACL_INT64 = 9,
ACL_UINT64 = 10,
ACL_DOUBLE = 11,
ACL_BOOL = 12,
ACL_STRING = 13,
} nvidia_aclDataType;
typedef enum {
ACL_FORMAT_UNDEFINED = -1,
ACL_FORMAT_NCHW = 0,
ACL_FORMAT_NHWC = 1,
ACL_FORMAT_ND = 2,
ACL_FORMAT_NC1HWC0 = 3,
ACL_FORMAT_FRACTAL_Z = 4,
ACL_FORMAT_NC1HWC0_C04 = 12,
ACL_FORMAT_FRACTAL_NZ = 29,
} nvidia_aclFormat;
typedef enum {
ACL_DEBUG = 0,
ACL_INFO = 1,
ACL_WARNING = 2,
ACL_ERROR = 3,
} nvidia_aclLogLevel;
//DVPP
// Supported Pixel Format
enum nvidia_acldvppPixelFormat {
PIXEL_FORMAT_YUV_400 = 0, // 0
PIXEL_FORMAT_YUV_SEMIPLANAR_420 = 1, // 1
PIXEL_FORMAT_YVU_SEMIPLANAR_420 = 2, // 2
PIXEL_FORMAT_YUV_SEMIPLANAR_422 = 3, // 3
PIXEL_FORMAT_YVU_SEMIPLANAR_422 = 4, // 4
PIXEL_FORMAT_YUV_SEMIPLANAR_444 = 5, // 5
PIXEL_FORMAT_YVU_SEMIPLANAR_444 = 6, // 6
PIXEL_FORMAT_YUYV_PACKED_422 = 7, // 7
PIXEL_FORMAT_UYVY_PACKED_422 = 8, // 8
PIXEL_FORMAT_YVYU_PACKED_422 = 9, // 9
PIXEL_FORMAT_VYUY_PACKED_422 = 10, // 10
PIXEL_FORMAT_YUV_PACKED_444 = 11, // 11
PIXEL_FORMAT_RGB_888 = 12, // 12
PIXEL_FORMAT_BGR_888 = 13, // 13
PIXEL_FORMAT_ARGB_8888 = 14, // 14
PIXEL_FORMAT_ABGR_8888 = 15, // 15
PIXEL_FORMAT_RGBA_8888 = 16, // 16
PIXEL_FORMAT_BGRA_8888 = 17, // 17
PIXEL_FORMAT_YUV_SEMI_PLANNER_420_10BIT = 18, // 18
PIXEL_FORMAT_YVU_SEMI_PLANNER_420_10BIT = 19, // 19
PIXEL_FORMAT_YVU_PLANAR_420 = 20, // 20
PIXEL_FORMAT_YVU_PLANAR_422,
PIXEL_FORMAT_YVU_PLANAR_444,
PIXEL_FORMAT_RGB_444 = 23,
PIXEL_FORMAT_BGR_444,
PIXEL_FORMAT_ARGB_4444,
PIXEL_FORMAT_ABGR_4444,
PIXEL_FORMAT_RGBA_4444,
PIXEL_FORMAT_BGRA_4444,
PIXEL_FORMAT_RGB_555,
PIXEL_FORMAT_BGR_555,
PIXEL_FORMAT_RGB_565,
PIXEL_FORMAT_BGR_565,
PIXEL_FORMAT_ARGB_1555,
PIXEL_FORMAT_ABGR_1555,
PIXEL_FORMAT_RGBA_1555,
PIXEL_FORMAT_BGRA_1555,
PIXEL_FORMAT_ARGB_8565,
PIXEL_FORMAT_ABGR_8565,
PIXEL_FORMAT_RGBA_8565,
PIXEL_FORMAT_BGRA_8565,
PIXEL_FORMAT_RGB_BAYER_8BPP = 50,
PIXEL_FORMAT_RGB_BAYER_10BPP,
PIXEL_FORMAT_RGB_BAYER_12BPP,
PIXEL_FORMAT_RGB_BAYER_14BPP,
PIXEL_FORMAT_RGB_BAYER_16BPP,
PIXEL_FORMAT_BGR_888_PLANAR = 70,
PIXEL_FORMAT_HSV_888_PACKAGE,
PIXEL_FORMAT_HSV_888_PLANAR,
PIXEL_FORMAT_LAB_888_PACKAGE,
PIXEL_FORMAT_LAB_888_PLANAR,
PIXEL_FORMAT_S8C1,
PIXEL_FORMAT_S8C2_PACKAGE,
PIXEL_FORMAT_S8C2_PLANAR,
PIXEL_FORMAT_S16C1,
PIXEL_FORMAT_U8C1,
PIXEL_FORMAT_U16C1,
PIXEL_FORMAT_S32C1,
PIXEL_FORMAT_U32C1,
PIXEL_FORMAT_U64C1,
PIXEL_FORMAT_S64C1,
PIXEL_FORMAT_YUV_SEMIPLANAR_440 = 1000,
PIXEL_FORMAT_YVU_SEMIPLANAR_440,
PIXEL_FORMAT_FLOAT32,
PIXEL_FORMAT_BUTT,
PIXEL_FORMAT_UNKNOWN = 10000
};
// Stream Format
enum nvidia_acldvppStreamFormat {
H265_MAIN_LEVEL = 0,
H264_BASELINE_LEVEL,
H264_MAIN_LEVEL,
H264_HIGH_LEVEL
};
// Supported Channel Mode
enum nvidia_acldvppChannelMode {
DVPP_CHNMODE_VPC = 1,
DVPP_CHNMODE_JPEGD = 2,
DVPP_CHNMODE_JPEGE = 4
};
// Supported Border Type
enum nvidia_acldvppBorderType {
BORDER_CONSTANT = 0,
BORDER_REPLICATE,
BORDER_REFLECT,
BORDER_REFLECT_101
};
// Venc parameter type
enum nvidia_aclvencChannelDescParamType {
ACL_VENC_THREAD_ID_UINT64 = 0,
ACL_VENC_CALLBACK_PTR,
ACL_VENC_PIXEL_FORMAT_UINT32,
ACL_VENC_ENCODE_TYPE_UINT32,
ACL_VENC_PIC_WIDTH_UINT32,
ACL_VENC_PIC_HEIGHT_UINT32,
ACL_VENC_KEY_FRAME_INTERVAL_UINT32,
ACL_VENC_BUF_ADDR_PTR,
ACL_VENC_BUF_SIZE_UINT32,
ACL_VENC_RC_MODE_UINT32,
ACL_VENC_SRC_RATE_UINT32,
ACL_VENC_MAX_BITRATE_UINT32,
ACL_VENC_MAX_IP_PROP_UINT32
};
#endif

95
src/base/AppCommon.h Normal file
View File

@ -0,0 +1,95 @@
#ifndef APP_COMMON_H
#define APP_COMMON_H
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <cstdint>
#include <fstream>
#include <functional>
#include <iostream>
#include <map>
#include <memory>
#include <mutex>
#include <numeric>
#include <queue>
#include <stack>
#include <sstream>
#include <string>
#include <thread>
#include <utility>
#include <unordered_map>
#include <vector>
#include <dirent.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <semaphore.h>
#include <signal.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include "Log.h"
//#include <opencv2/opencv.hpp>
//#include <opencv2/core/core.hpp>
//#include <opencv2/highgui/highgui.hpp>
//#include <opencv2/imgproc/imgproc.hpp>
#ifdef __cplusplus
extern "C"
{
#endif
#include <libavcodec/avcodec.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#ifdef __cplusplus
};
#endif
#include "CommonDataType.h"
#include "nvidia_acl_datatype.h"
#define AEI_COMM_BUFFER_SIZE 2048
typedef struct
{
std::string strRfid;
std::string strTime;
std::string strTrainTime;
} RfidInfo;
typedef struct
{
std::string strCarriageType;
std::string strCarriageNum;
std::string strOrder;
std::string strTrainTime;
std::string strRfidInfo;
std::string strNowTime;
} TrainInfo;
typedef struct
{
std::string strComeTime;
int iDirection;
bool bComeTrain;
} ComeTrainInfo;
// 定义日期结构体
typedef struct Date
{
int year;
int month;
int day;
} Date;
#endif

View File

@ -0,0 +1,250 @@
//
// Created by matrixai on 3/26/24.
//
#include "BaseComPort.h"
//构造函数
//输入参数:无
//输出参数:无
//返回值:无
BaseComPort::BaseComPort()
{
memset(chbuffer, 0x00, sizeof(AEI_COMM_BUFFER_SIZE));
m_map_parity = {
{"n",0},
{"N",1},
{"o",2},
{"O",3},
{"e",4},
{"E",5},
{"s",6},
{"S",7}
};
}
//析构函数
//输入参数:无
//输出参数:无
//返回值:无
BaseComPort::~BaseComPort()
{
ttyClose();
}
//串口打开
int32_t BaseComPort::ttyOpen(std::string strName)
{
LogInfo<<"m_serial_data.name.c_str() is: "<<strName;
// 打开的文件名称O_RDWR表示文件可读可写非阻塞式文件位终端设备
fd = open(strName.c_str(), O_RDWR | O_NOCTTY | O_NDELAY);
LogInfo<<"fd value is: "<<fd;
if (fd < 0)
{
LogError<<"open device failure.";
return -1;
}
/* 测试该设备是否为tty设备 */
if (isatty(fd) == 0)
{
LogError<<"not tty device.";
return -1;
}
LogInfo<<"tty device is ok";
return fd;
}
//串口关闭
int32_t BaseComPort::ttyClose()
{
close(fd);
fd = -1;
return fd;
}
bool BaseComPort::bRuning()
{
return (fd >= 0);
}
//属性参数设置
int32_t BaseComPort::ttySetBaud(int nbaud, int ndatabits, std::string strparitybits, int nstopbits)
{
int32_t ret = 0;
bzero(&ntm, sizeof(ntm)); //将自己的前n个数清0
//获取终端的控制参数成功return 0;
if(tcgetattr(fd,&ntm) != 0)
{
LogError<<"setup serial failure.";
return -1;
}
// ntm.c_cflag = CS8 | CLOCAL | CREAD; //收发采用8位数据位忽略所有调制解调器的状态行启动字符接收器
ntm.c_cflag &= ~CSIZE; //清楚数据位掩码
switch(nbaud)
{
case 300:
//ntm.c_cflag |= B300;
cfsetispeed(&ntm, B300);
cfsetospeed(&ntm, B300);
break;
case 1200:
cfsetispeed(&ntm, B1200);
cfsetospeed(&ntm, B1200);
break;
case 2400:
cfsetispeed(&ntm, B2400);
cfsetospeed(&ntm, B2400);
break;
case 4800:
cfsetispeed(&ntm, B4800);
cfsetospeed(&ntm, B4800);
break;
case 9600:
cfsetispeed(&ntm, B9600);
cfsetospeed(&ntm, B9600);
break;
case 19200:
cfsetispeed(&ntm, B19200);
cfsetospeed(&ntm, B19200);
break;
case 38400:
cfsetispeed(&ntm, B38400);
cfsetospeed(&ntm, B38400);
break;
case 115200:
cfsetispeed(&ntm, B115200);
cfsetospeed(&ntm, B115200);
break;
default:
LogError<<"the value of m_serial_data.baud is error, please check.";
ret = -1;
break;
}
//设置数据位数
switch (ndatabits)
{
case 5:
ntm.c_cflag |= CS5;
break;
case 6:
ntm.c_cflag |= CS6;
break;
case 7:
ntm.c_cflag |= CS7;
break;
case 8:
ntm.c_cflag |= CS8;
break;
default:
LogError<<"the data_bits in config.yaml is error, please check.";
return -1;
break;
}
// 设置奇偶校验位数
int32_t parity = -1;
auto findResult = m_map_parity.find(strparitybits);
if(findResult != m_map_parity.end())
{
parity = m_map_parity[strparitybits];
}
switch (parity)
{
case 0:
case 1:
ntm.c_cflag &= ~PARENB;
ntm.c_iflag &= ~INPCK;
break;
case 2:
case 3:
ntm.c_cflag |= (PARODD|PARENB);
ntm.c_iflag |= INPCK;
break;
case 4:
case 5:
ntm.c_cflag |= PARENB;
ntm.c_cflag &= ~PARODD;
ntm.c_iflag |= INPCK;
break;
case 6:
case 7:
ntm.c_cflag &= ~PARENB;
ntm.c_cflag &= ~CSTOPB;
break;
default:
LogError<<"the parity_bits in config.yaml is error, please check.";
return -1;
break;
}
// 设置停止位
switch (nstopbits)
{
case 1:
ntm.c_cflag &= ~CSTOPB;
break;
case 2:
ntm.c_cflag |= CSTOPB;
break;
default:
LogError<<"the stop_bits in config.yaml is error, please check.";
return -1;
break;
}
//ntm.c_lflag = 0;
ntm.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG);
ntm.c_cc[VTIME] = 10; //设置超时时间10s
ntm.c_cc[VMIN] = 1; //至少读取1个字节的数据或者设为0
tcflush(fd, TCIFLUSH); //清空终端未完成的输入/输出请求及数据
//设置终端控制参数TCSANOW立刻生效
if (tcsetattr(fd,TCSANOW,&ntm) != 0)
{
LogError<<"setup serial failure";
return -1;
}
return ret;
}
//串口数据读取
int32_t BaseComPort::ttyRead()
{
memset(chbuffer, 0x00, sizeof(chbuffer));
int nRev = read(fd, chbuffer, (sizeof(chbuffer) - 1));
//LogInfo<<fd<<":"<<chbuffer<<":"<<nRev;
return nRev;
}
//串口数据写入
int32_t BaseComPort::ttyWrite(char *buf, int32_t count)
{
int nRev = write(fd, buf, count);
return nRev;
}
int32_t BaseComPort::vGetBuffer(char *chOutBuff, int32_t nBufferSize)
{
chbuffer[sizeof(chbuffer) - 1] = 0x00;
int nBufferLength = strlen(chbuffer);
int nOutLength = nBufferLength < nBufferSize ? nBufferLength : nBufferSize;
memset(chOutBuff, 0x00, nBufferSize);
memcpy(chOutBuff, chbuffer, nOutLength);
return nOutLength;
}
int32_t BaseComPort::vGetMaxBufferSize()
{
return AEI_COMM_BUFFER_SIZE;
}

View File

@ -0,0 +1,65 @@
//
// Created by matrixai on 3/26/24.
//
#ifndef TRAIN_RFID_LINUX_BASECOMPORT_H
#define TRAIN_RFID_LINUX_BASECOMPORT_H
#include <string>
#include <stdio.h>
#include <ctype.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <unistd.h>
#include <fcntl.h>
#include <termios.h>
#include "AppCommon.h"
#define SOCKET_TIME_OUT_SETTING 10
class BaseComPort
{
public:
BaseComPort();
~BaseComPort();
//打开comport通信
int32_t ttyOpen(std::string strName);
//关闭comport通信
int32_t ttyClose();
//设定波特率
int32_t ttySetBaud(int nbaud, int ndatabits, std::string nparitybits, int nstopbits);
//读取comport数据
int32_t ttyRead();
//写入comport数据
int32_t ttyWrite(char *buf, int32_t count);
//是否运行
bool bRuning();
//获取Buffer信息
int32_t vGetBuffer(char *chOutBuff, int32_t nSize);
int32_t vGetMaxBufferSize();
private:
int32_t fd = -1;
//运行状态
bool bRun = false;
char chbuffer[AEI_COMM_BUFFER_SIZE] = {0};
struct termios ntm; //串口设备选项
std::map<std::string, int32_t> m_map_parity;
};
#endif //TRAIN_RFID_LINUX_BASECOMPORT_H

View File

@ -0,0 +1,141 @@
//
// Created by matrixai on 3/26/24.
//
#include "BaseSocket.h"
//构造函数
//输入参数:无
//输出参数:无
//返回值:无
BaseSocket::BaseSocket()
{
}
//析构函数
//输入参数:无
//输出参数:无
//返回值:无
BaseSocket::~BaseSocket()
{
}
//打开socket
//输入参数:无
//输出参数:无
//返回值:无
void BaseSocket::Open()
{
nSocketId = socket(AF_INET, SOCK_STREAM, 0); //SOCK_NONBLOCK
if(nSocketId == -1)
{
bRun = false;
} else {
struct timeval recvTimersetting;
//接收超时时间固定3秒
recvTimersetting.tv_sec = 3;
recvTimersetting.tv_usec = 0;
//设定接收超时时间
setsockopt(nSocketId, SOL_SOCKET, SO_RCVTIMEO, &recvTimersetting, sizeof(recvTimersetting));
struct timeval sendTimersetting;
//发送超时时间固定3秒
sendTimersetting.tv_sec = 3;
sendTimersetting.tv_usec = 0;
//设定发送超时时间c
setsockopt(nSocketId, SOL_SOCKET, SO_SNDTIMEO, &sendTimersetting, sizeof(sendTimersetting));
}
}
//设定socket所用的IP对应的端口号
//输入参数:
// int port:
//输出参数:无
//返回值:无
void BaseSocket::SetPort(int port, std::string strIp)
{
memset(&serv_addr, 0, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(port); //没有使用的端口
serv_addr.sin_addr.s_addr = inet_addr(strIp.c_str()); //本地所有的IP
}
//socket初始化进行bind/listen/accept处理
//输入参数:无
//输出参数:无
//返回值:无
void BaseSocket::Init()
{
/*
bind(nSocketId, (struct sockaddr*)&serv_addr, sizeof(serv_addr));
//请求排队的最大长度固定为64
listen(nSocketId, 64);
struct sockaddr_in clien_addr;
nConnectId = accept(nSocketId, (struct sockaddr*)&clien_addr, &clien_len);
*/
socklen_t serv_len = sizeof(serv_addr);
nConnectId = connect(nSocketId, (struct sockaddr*)&serv_addr, serv_len);
if(nConnectId >= 0){
bRun = true;
}
}
//从socket读取数据
//输入参数:无
//输出参数:无
//返回值:
// int len读取到的数据长度
int BaseSocket::Read()
{
memset(chbuffer, 0x00, sizeof(chbuffer));
int len = recv(nSocketId, chbuffer, (sizeof(chbuffer) - 1), 0);
if(nConnectId < 0 || len == 0) bRun = false;
return len;
}
int BaseSocket::Monitor()
{
// memset(chbuffer, 0x00, sizeof(chbuffer));
// chbuffer[0] = 0x30;
// int len = send(nSocketId, chbuffer, (sizeof(chbuffer) - 1), 0);
std::string heart = "matrixai";
int len = send(nSocketId, heart.c_str(), (sizeof(heart) - 1), 0);
if(nConnectId < 0 || len == 0) bRun = false;
return len;
}
//返回socket状态是否连接
//输入参数:无
//输出参数:无
//返回值:
// bool bRuntrue正常运行 false未连接
bool BaseSocket::bRuning()
{
return bRun;
}
//关闭socket
//输入参数:无
//输出参数:无
//返回值:无
void BaseSocket::Close()
{
if(nConnectId != 0){
close(nConnectId);
}
if(nSocketId != 0){
close(nSocketId);
}
bRun = false;
}

View File

@ -0,0 +1,61 @@
//
// Created by matrixai on 3/26/24.
//
#ifndef TRAIN_RFID_LINUX_BASESOCKET_H
#define TRAIN_RFID_LINUX_BASESOCKET_H
#include <string>
#include <stdio.h>
#include <ctype.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include "AppCommon.h"
#define SOCKET_TIME_OUT_SETTING 10
class BaseSocket
{
public:
BaseSocket();
~BaseSocket();
//打开socket通信
void Open();
//初始化
void Init();
//读取socket数据
int Read();
int Monitor();
//关闭socket通信
void Close();
//设定端口
void SetPort(int port, std::string strIp);
char chbuffer[AEI_COMM_BUFFER_SIZE] = {0};
//是否运行
bool bRuning();
//socket返回值
int nSocketId = 0;
//bind返回值
int nConnectId = 0;
private:
//运行状态
bool bRun = false;
//socket绑定的IP信息
struct sockaddr_in serv_addr;
};
#endif //TRAIN_RFID_LINUX_BASESOCKET_H

View File

@ -0,0 +1,208 @@
/*
* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BLOCKING_QUEUE_H
#define BLOCKING_QUEUE_H
#include "ErrorCode.h"
#include <condition_variable>
#include <list>
#include <mutex>
#include <stdint.h>
static const int DEFAULT_MAX_QUEUE_SIZE = 256;
template<typename T> class BlockingQueue {
public:
BlockingQueue(uint32_t maxSize = DEFAULT_MAX_QUEUE_SIZE) : max_size_(maxSize), is_stoped_(false) {}
~BlockingQueue() {}
APP_ERROR Pop(T &item)
{
std::unique_lock<std::mutex> lock(mutex_);
while (queue_.empty() && !is_stoped_) {
empty_cond_.wait(lock);
}
if (is_stoped_) {
return APP_ERR_QUEUE_STOPED;
}
if (queue_.empty()) {
return APP_ERR_QUEUE_EMPTY;
} else {
item = queue_.front();
queue_.pop_front();
}
full_cond_.notify_one();
return APP_ERR_OK;
}
APP_ERROR Pop(T& item, unsigned int timeOutMs)
{
std::unique_lock<std::mutex> lock(mutex_);
auto realTime = std::chrono::milliseconds(timeOutMs);
while (queue_.empty() && !is_stoped_) {
empty_cond_.wait_for(lock, realTime);
}
if (is_stoped_) {
return APP_ERR_QUEUE_STOPED;
}
if (queue_.empty()) {
return APP_ERR_QUEUE_EMPTY;
} else {
item = queue_.front();
queue_.pop_front();
}
full_cond_.notify_one();
return APP_ERR_OK;
}
APP_ERROR Push(const T& item, bool isWait = false)
{
std::unique_lock<std::mutex> lock(mutex_);
while (queue_.size() >= max_size_ && isWait && !is_stoped_) {
full_cond_.wait(lock);
}
if (is_stoped_) {
return APP_ERR_QUEUE_STOPED;
}
if (queue_.size() >= max_size_) {
return APP_ERROR_QUEUE_FULL;
}
queue_.push_back(item);
empty_cond_.notify_one();
return APP_ERR_OK;
}
APP_ERROR Push_Front(const T &item, bool isWait = false)
{
std::unique_lock<std::mutex> lock(mutex_);
while (queue_.size() >= max_size_ && isWait && !is_stoped_) {
full_cond_.wait(lock);
}
if (is_stoped_) {
return APP_ERR_QUEUE_STOPED;
}
if (queue_.size() >= max_size_) {
return APP_ERROR_QUEUE_FULL;
}
queue_.push_front(item);
empty_cond_.notify_one();
return APP_ERR_OK;
}
void Stop()
{
{
std::unique_lock<std::mutex> lock(mutex_);
is_stoped_ = true;
}
full_cond_.notify_all();
empty_cond_.notify_all();
}
void Restart()
{
{
std::unique_lock<std::mutex> lock(mutex_);
is_stoped_ = false;
}
}
// if the queue is stoped ,need call this function to release the unprocessed items
std::list<T> GetRemainItems()
{
std::unique_lock<std::mutex> lock(mutex_);
if (!is_stoped_) {
return std::list<T>();
}
return queue_;
}
APP_ERROR GetBackItem(T &item)
{
if (is_stoped_) {
return APP_ERR_QUEUE_STOPED;
}
if (queue_.empty()) {
return APP_ERR_QUEUE_EMPTY;
}
item = queue_.back();
return APP_ERR_OK;
}
std::mutex *GetLock()
{
return &mutex_;
}
APP_ERROR IsFull()
{
std::unique_lock<std::mutex> lock(mutex_);
return queue_.size() >= max_size_;
}
int GetSize()
{
return queue_.size();
}
APP_ERROR IsEmpty()
{
return queue_.empty();
}
void Clear()
{
std::unique_lock<std::mutex> lock(mutex_);
queue_.clear();
}
private:
std::list<T> queue_;
std::mutex mutex_;
std::condition_variable empty_cond_;
std::condition_variable full_cond_;
uint32_t max_size_;
bool is_stoped_;
};
#endif // __INC_BLOCKING_QUEUE_H__

152
src/base/CBase64/CBase64.h Normal file
View File

@ -0,0 +1,152 @@
/*
* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _CBASE64_H_
#define _CBASE64_H_
namespace {
const int SHIFT_NUMBER_2 = 2;
const int SHIFT_NUMBER_4 = 4;
const int SHIFT_NUMBER_6 = 6;
const int SHIFT_NUMBER_8 = 8;
const int SHIFT_NUMBER_12 = 12;
const int SHIFT_NUMBER_16 = 16;
const int SHIFT_NUMBER_18 = 18;
const int EACH_STEP_SIZE = 4;
const int NORMAL_NUMBER_1 = 1;
const int NORMAL_NUMBER_2 = 2;
const int NORMAL_NUMBER_3 = 3;
}
class CBase64 {
public:
CBase64() = default;
~CBase64() = default;
/*
* base64 encode
* @param data input data
* @param dataSize data size
* @return base64 string
*/
static std::string Encode(const std::string &buffer, int dataSize)
{
// coding table
const char EncodeTable[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
std::string result;
const char *data = buffer.c_str();
unsigned char tmp[EACH_STEP_SIZE] = {0};
int lineLength = 0;
const int turnBufferLength = 3;
const int maxLineLength = 76;
for (int i = 0; i < (int) (dataSize / turnBufferLength); i++) {
tmp[NORMAL_NUMBER_1] = *data++;
tmp[NORMAL_NUMBER_2] = *data++;
tmp[NORMAL_NUMBER_3] = *data++;
result += EncodeTable[tmp[NORMAL_NUMBER_1] >> SHIFT_NUMBER_2];
result += EncodeTable[((tmp[NORMAL_NUMBER_1] << SHIFT_NUMBER_4) |
(tmp[NORMAL_NUMBER_2] >> SHIFT_NUMBER_4)) & 0x3F];
result += EncodeTable[((tmp[NORMAL_NUMBER_2] << SHIFT_NUMBER_2) |
(tmp[NORMAL_NUMBER_3] >> SHIFT_NUMBER_6)) & 0x3F];
result += EncodeTable[tmp[NORMAL_NUMBER_3] & 0x3F];
if (lineLength += EACH_STEP_SIZE, lineLength == maxLineLength) {
lineLength = 0;
}
}
int Mod = dataSize % turnBufferLength;
if (Mod == NORMAL_NUMBER_1) {
tmp[NORMAL_NUMBER_1] = *data++;
result += EncodeTable[(tmp[NORMAL_NUMBER_1] & 0xFC) >> SHIFT_NUMBER_2];
result += EncodeTable[((tmp[NORMAL_NUMBER_1] & 0x03) << SHIFT_NUMBER_4)];
result += "==";
} else if (Mod == NORMAL_NUMBER_2) {
tmp[NORMAL_NUMBER_1] = *data++;
tmp[NORMAL_NUMBER_2] = *data++;
result += EncodeTable[(tmp[NORMAL_NUMBER_1] & 0xFC) >> SHIFT_NUMBER_2];
result += EncodeTable[((tmp[NORMAL_NUMBER_1] & 0x03) << SHIFT_NUMBER_4) |
((tmp[NORMAL_NUMBER_2] & 0xF0) >> SHIFT_NUMBER_4)];
result += EncodeTable[((tmp[NORMAL_NUMBER_2] & 0x0F) << SHIFT_NUMBER_2)];
result += "=";
}
return result;
}
/*
* base64 decode
* @param data base64 encoded string
* @param dataSize data size
* @param OutByte
* @return
*/
static std::string Decode(const std::string &buffer, int dataSize, int &outSize)
{
const char decodeTable[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62, // '+'
0, 0, 0,
63, // '/'
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // '0'-'9'
0, 0, 0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // 'A'-'Z'
0, 0, 0, 0, 0, 0,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // 'a'-'z'
};
const char *data = buffer.c_str();
std::string result;
int nValue;
int i = 0;
while (i < dataSize) {
if (*data != '\r' && *data != '\n') {
nValue = decodeTable[(unsigned char) (*data++)] << SHIFT_NUMBER_18;
nValue += decodeTable[(unsigned char) (*data++)] << SHIFT_NUMBER_12;
result += (nValue & 0x00FF0000) >> SHIFT_NUMBER_16;
outSize++;
if (*data == '=') {
i += EACH_STEP_SIZE;
continue;
}
nValue += decodeTable[(unsigned char) (*data++)] << SHIFT_NUMBER_6;
result += (nValue & 0x0000FF00) >> SHIFT_NUMBER_8;
outSize++;
if (*data != '=') {
nValue += decodeTable[(unsigned char) (*data++)];
result += nValue & 0x000000FF;
outSize++;
}
i += EACH_STEP_SIZE;
} else {
data++;
i++;
}
}
return result;
}
};
#endif

View File

@ -0,0 +1,239 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <sstream>
#include "CommandParser.h"
namespace {
const int DEFAULT_LENGTH = 30; // The length of delimiter for help information
const int MOD2 = 2; // Constant to make sure the parameters after ./main is pairs
}
CommandParser::CommandParser()
{
commands_["-h"] = std::make_pair("help", "show helps");
commands_["-help"] = std::make_pair("help", "show helps");
}
// Add options into the map
void CommandParser::AddOption(const std::string &option, const std::string &defaults, const std::string &message)
{
commands_[option] = std::make_pair(defaults, message);
}
// Construct a new Command Parser object according to the argument
// Attention: This function may cause the program to exit directly
CommandParser::CommandParser(int argc, const char **argv)
{
ParseArgs(argc, argv);
}
// Attention: This function will cause the program to exit directly when calling ShowUsage()
void CommandParser::ParseArgs(int argc, const char **argv)
{
if (argc % MOD2 == 0) {
ShowUsage();
}
for (int i = 1; i < argc; ++i) {
std::string input(argv[i]);
if (input == "-h" || input == "-help") {
ShowUsage();
}
}
for (int i = 1; i < argc; ++i) {
if (i + 1 < argc && argv[i][0] == '-' && argv[i + 1][0] != '-') {
++i;
continue;
}
ShowUsage();
}
for (int i = 1; i < argc; ++i) {
if (commands_.find(argv[i]) == commands_.end()) {
ShowUsage();
}
++i;
}
for (int i = 1; i < argc; ++i) {
if (argv[i][0] == '-') {
if (i + 1 < argc && argv[i + 1][0] != '-') {
commands_[argv[i]].first = argv[i + 1];
++i;
}
}
}
}
// Get the option string value from parser
// Attention: This function will cause the program to exit directly when calling ShowUsage()
const std::string &CommandParser::GetStringOption(const std::string &option)
{
if (commands_.find(option) == commands_.end()) {
std::cout << "GetStringOption fail, can not find the option " << option << ", make sure the option is correct!"
<< std::endl;
ShowUsage();
}
return commands_[option].first;
}
// Get the int value by option
// Attention: This function will cause the program to exit directly when calling ShowUsage()
const int CommandParser::GetIntOption(const std::string &option)
{
std::string str = GetStringOption(option);
if (!IsInteger(str)) {
std::cout << "input value " << str << " after" << option << " is invalid" << std::endl;
ShowUsage();
}
std::stringstream ss(str);
int value = 0;
ss >> value;
return value;
}
// Get the uint32 value by option
// Attention: This function will cause the program to exit directly when calling ShowUsage()
const uint32_t CommandParser::GetUint32Option(const std::string &option)
{
std::string str = GetStringOption(option);
if (!IsInteger(str)) {
std::cout << "input value " << str << " after" << option << " is invalid" << std::endl;
ShowUsage();
}
std::stringstream ss(str);
uint32_t value = 0;
ss >> value;
return value;
}
// Get the int value by option
// Attention: This function will cause the program to exit directly when calling ShowUsage()
const float CommandParser::GetFloatOption(const std::string &option)
{
std::string str = GetStringOption(option);
if (!IsDecimal(str)) {
std::cout << "input value " << str << " after" << option << " is invalid" << std::endl;
ShowUsage();
}
std::stringstream ss(str);
float value = 0.0;
ss >> value;
return value;
}
// Get the double option
// Attention: This function will cause the program to exit directly when calling ShowUsage()
const double CommandParser::GetDoubleOption(const std::string &option)
{
std::string str = GetStringOption(option);
if (!IsDecimal(str)) {
std::cout << "input value " << str << " after" << option << " is invalid" << std::endl;
ShowUsage();
}
std::stringstream ss(str);
double value = 0.0;
ss >> value;
return value;
}
// Get the bool option
// Attention: This function will cause the program to exit directly when calling ShowUsage()
const bool CommandParser::GetBoolOption(const std::string &option)
{
std::string str = GetStringOption(option);
if (str == "true" || str == "True" || str == "TRUE") {
return true;
} else if (str == "false" || str == "False" || str == "FALSE") {
return false;
} else {
std::cout << "GetBoolOption fail, make sure you set the correct value true or false, but not " << str;
ShowUsage();
return false;
}
}
// Show the usage of app, then exit
// Attention: This function will cause the program to exit directly after printing usage
void CommandParser::ShowUsage() const
{
std::string space(DEFAULT_LENGTH, ' ');
std::string split(DEFAULT_LENGTH, '-');
std::cout << std::endl << split << "help information" << split << std::endl;
std::cout.setf(std::ios::left);
for (auto &it : commands_) {
if (it.first.size() >= DEFAULT_LENGTH) {
std::cout << it.first << std::endl;
if (it.second.first.size() >= DEFAULT_LENGTH) {
std::cout << space << it.second.first << std::endl;
std::cout << space << space << it.second.second << std::endl;
continue;
}
std::cout << std::setw(DEFAULT_LENGTH) << it.second.first << std::setw(DEFAULT_LENGTH) << it.second.second
<< std::endl;
continue;
}
if (it.second.first.size() >= DEFAULT_LENGTH) {
std::cout << std::setw(DEFAULT_LENGTH) << it.first << std::setw(DEFAULT_LENGTH) << it.second.first
<< std::endl;
std::cout << space << space << std::setw(DEFAULT_LENGTH) << it.second.second << std::endl;
continue;
}
std::cout << std::setw(DEFAULT_LENGTH) << it.first << std::setw(DEFAULT_LENGTH) << it.second.first
<< std::setw(DEFAULT_LENGTH) << it.second.second << std::endl;
}
std::cout.setf(std::ios::right);
std::cout << std::endl;
exit(0);
}
bool CommandParser::IsInteger(std::string &str) const
{
for (size_t i = 0; i < str.size(); ++i) {
if (i == 0 && str[i] == '-') {
continue;
}
if (str[i] < '0' || str[i] > '9') {
return false;
}
}
return true;
}
bool CommandParser::IsDecimal(std::string &str) const
{
size_t dotNum = 0;
for (size_t i = 0; i < str.size(); ++i) {
if (i == 0 && str[i] == '-') {
continue;
}
if (str[i] == '.') {
++dotNum;
continue;
}
if (str[i] < '0' || str[i] > '9') {
return false;
}
}
if (dotNum <= 1) {
return true;
} else {
return false;
}
}

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef COMMAND_PARSER_H
#define COMMAND_PARSER_H
#include <string>
#include <map>
#include <vector>
// Command parser class
class CommandParser {
public:
CommandParser();
// Construct a new Command Parser object according to the argument
CommandParser(int argc, const char **argv);
~CommandParser() {};
// Add options into the map
void AddOption(const std::string &option, const std::string &defaults = "", const std::string &message = "");
// Parse the input arguments
void ParseArgs(int argc, const char **argv);
// Get the option string value from parser
const std::string &GetStringOption(const std::string &option);
// Get the int value by option
const int GetIntOption(const std::string &option);
const uint32_t GetUint32Option(const std::string &option);
// Get the int value by option
const float GetFloatOption(const std::string &option);
// Get the double option
const double GetDoubleOption(const std::string &option);
// Get the bool option
const bool GetBoolOption(const std::string &option);
private:
std::map<std::string, std::pair<std::string, std::string>> commands_;
// Show the usage of app, then exit
void ShowUsage() const;
bool IsInteger(std::string &str) const;
bool IsDecimal(std::string &str) const;
};
#endif /* COMMANDPARSER_H */

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef COMMONDATATYPE_H
#define COMMONDATATYPE_H
#include <stdio.h>
#include <iostream>
#include <memory>
#include <vector>
#include "nvidia_acl_datatype.h"
#define ALIGN_UP(x, align) ((((x) + ((align)-1)) / (align)) * (align))
const uint32_t VIDEO_H264 = 0;
const uint32_t VIDEO_H265 = 1;
const float SEC2MS = 1000.0;
const int YUV_BGR_SIZE_CONVERT_3 = 3;
const int YUV_BGR_SIZE_CONVERT_2 = 2;
const int DVPP_JPEG_OFFSET = 8;
const int VPC_WIDTH_ALIGN = 16;
const int VPC_HEIGHT_ALIGN = 2;
const int JPEG_WIDTH_ALIGN = 128;
const int JPEG_HEIGHT_ALIGN = 16;
const int VPC_OFFSET_ALIGN = 2;
#ifdef _WIN32
const int W_OK = 2;
const int R_OK = 4;
#endif
// Tensor Descriptor
struct Tensor {
nvidia_aclDataType dataType; // Tensor data type
nvidia_aclFormat format; // Format of tensor, e.g. ND, NCHW, NC1HWC0
int numDim; // Number of dimensions of Tensor
std::vector<int64_t> dims; // Dimension vector
std::string name; // Name of tensor
};
// Data type of tensor
enum OpAttrType {
BOOL_TYPE = 0,
INT_TYPE = 1,
FLOAT_TYPE = 2,
STRING_TYPE = 3,
LIST_BOOL_TYPE = 4,
LIST_INT_TYPE = 6,
LIST_FLOAT_TYPE = 7,
LIST_STRING_TYPE = 8,
LIST_LIST_INT_TYPE = 9,
};
// operator attribution describe
// type decide whether the other attribute needed to set a value
struct OpAttr {
std::string name;
OpAttrType type;
int num; // LIST_BOOL/INT/FLOAT/STRING/LIST_LIST_INT need
uint8_t numBool; // BOOL need
int64_t numInt; // INT need
float numFloat; // FLOAT need
std::string numString; // STRING need
std::vector<uint8_t> valuesBool; // LIST_BOOL need
std::vector<int64_t> valuesInt; // LIST_INT need
std::vector<float> valuesFloat; // LIST_FLOAT need
std::vector<std::string> valuesString; // LIST_STRING need
std::vector<int> numLists; // LIST_LIST_INT need
std::vector<std::vector<int64_t>> valuesListList; // LIST_LIST_INT need
};
// Description of image data
struct ImageInfo {
uint32_t width; // Image width
uint32_t height; // Image height
uint32_t lenOfByte; // Size of image data, bytes
std::shared_ptr<uint8_t> data; // Smart pointer of image data
};
// Description of data in device
struct RawData {
size_t lenOfByte; // Size of memory, bytes
std::shared_ptr<void> data; // Smart pointer of data
};
// Description of data in device
struct StreamData {
size_t size; // Size of memory, bytes
std::shared_ptr<void> data; // Smart pointer of data
};
// Description of stream data
struct StreamInfo {
std::string format;
uint32_t height;
uint32_t width;
uint32_t channelId;
std::string streamPath;
};
// define the structure of an rectangle
struct Rectangle {
uint32_t leftTopX;
uint32_t leftTopY;
uint32_t rightBottomX;
uint32_t rightBottomY;
};
struct ObjectDetectInfo {
int32_t classId;
float confidence;
struct Rectangle location;
};
enum VpcProcessType {
VPC_PT_DEFAULT = 0,
VPC_PT_PADDING, // Resize with locked ratio and paste on upper left corner
VPC_PT_FIT, // Resize with locked ratio and paste on middle location
VPC_PT_FILL, // Resize with locked ratio and paste on whole locatin, the input image may be cropped
};
struct CropRoiConfig {
uint32_t left;
uint32_t right;
uint32_t down;
uint32_t up;
};
struct DvppDataInfo {
uint32_t width = 0; // Width of image
uint32_t height = 0; // Height of image
uint32_t widthStride = 0; // Width after align up
uint32_t heightStride = 0; // Height after align up
nvidia_acldvppPixelFormat format = PIXEL_FORMAT_YUV_SEMIPLANAR_420; // Format of image
uint32_t frameId = 0; // Needed by video
uint32_t dataSize = 0; // Size of data in byte
uint8_t *data = nullptr; // Image data
uint64_t timestamp;
};
struct DvppCropInputInfo {
DvppDataInfo dataInfo;
CropRoiConfig roi;
};
// Description of matrix info
struct MatrixInfo {
uint32_t row = 0; // row of matrix
uint32_t col = 0; // col of matrix
uint32_t dataSize = 0; // size of memory, bytes
std::shared_ptr<void> data = nullptr; // data of matrix
nvidia_aclDataType dataType = ACL_FLOAT16; // data Type of matrix
};
// Description of coefficient info
struct CoefficientInfo {
std::shared_ptr<void> data = nullptr; // data of coefficient
nvidia_aclDataType dataType = ACL_FLOAT16; // dataType
};
// define the input of BLAS operator such as producing:
// C = alpha * A * B + beta * C
struct BlasInput {
MatrixInfo A;
MatrixInfo B;
MatrixInfo C;
CoefficientInfo alpha;
CoefficientInfo beta;
};
#endif

View File

@ -0,0 +1,235 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sstream>
#include <functional>
#include "ConfigParser.h"
namespace {
const char COMMENT_CHARATER = '#';
// Breaks the string at the separator (char) and returns a list of strings
void Split(const std::string &inString, std::vector<std::string> &outVector, const char delimiter)
{
std::stringstream ss(inString);
std::string item;
while (std::getline(ss, item, delimiter)) {
outVector.push_back(item);
}
}
}
// Remove all spaces from the string
inline void ConfigParser::RemoveAllSpaces(std::string &str) const
{
str.erase(std::remove_if(str.begin(), str.end(), isspace), str.end());
return;
}
// Remove spaces from both left and right based on the string
inline void ConfigParser::Trim(std::string &str) const
{
str.erase(str.begin(), std::find_if(str.begin(), str.end(), std::not1(std::ptr_fun(::isspace))));
str.erase(std::find_if(str.rbegin(), str.rend(), std::not1(std::ptr_fun(::isspace))).base(), str.end());
return;
}
APP_ERROR ConfigParser::ParseConfig(const std::string &fileName)
{
// Open the input file
std::ifstream inFile(fileName);
if (!inFile.is_open()) {
std::cout << "cannot read setup.config file!" << std::endl;
return APP_ERR_COMM_EXIST;
}
std::string line, newLine;
int startPos, endPos, pos;
// Cycle all the line
while (getline(inFile, line)) {
if (line.empty()) {
continue;
}
startPos = 0;
endPos = line.size() - 1;
pos = line.find(COMMENT_CHARATER); // Find the position of comment
if (pos != -1) {
if (pos == 0) {
continue;
}
endPos = pos - 1;
}
newLine = line.substr(startPos, (endPos - startPos) + 1); // delete comment
pos = newLine.find('=');
if (pos == -1) {
continue;
}
std::string na = newLine.substr(0, pos);
Trim(na); // Delete the space of the key name
std::string value = newLine.substr(pos + 1, endPos + 1 - (pos + 1));
Trim(value); // Delete the space of value
configData_.insert(std::make_pair(na, value)); // Insert the key-value pairs into configData_
}
return APP_ERR_OK;
}
// Get the string value by key name
APP_ERROR ConfigParser::GetStringValue(const std::string &name, std::string &value) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
value = configData_.find(name)->second;
return APP_ERR_OK;
}
// Get the int value by key name
APP_ERROR ConfigParser::GetIntValue(const std::string &name, int &value) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
std::string str = configData_.find(name)->second;
if (!(std::stringstream(str) >> value)) {
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
// Get the unsigned integer value by key name
APP_ERROR ConfigParser::GetUnsignedIntValue(const std::string &name, unsigned int &value) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
std::string str = configData_.find(name)->second;
if (!(std::stringstream(str) >> value)) {
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
// Get the bool value
APP_ERROR ConfigParser::GetBoolValue(const std::string &name, bool &value) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
std::string str = configData_.find(name)->second;
if (str == "true") {
value = true;
} else if (str == "false") {
value = false;
} else {
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
// Get the float value
APP_ERROR ConfigParser::GetFloatValue(const std::string &name, float &value) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
std::string str = configData_.find(name)->second;
if (!(std::stringstream(str) >> value)) {
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
// Get the double value
APP_ERROR ConfigParser::GetDoubleValue(const std::string &name, double &value) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
std::string str = configData_.find(name)->second;
if (!(std::stringstream(str) >> value)) {
return APP_ERR_COMM_INVALID_PARAM;
}
return APP_ERR_OK;
}
// Array like 1,2,4,8 split by ","
APP_ERROR ConfigParser::GetVectorUint32Value(const std::string &name, std::vector<uint32_t> &vector) const
{
if (configData_.count(name) == 0) {
return APP_ERR_COMM_NO_EXIST;
}
std::string str = configData_.find(name)->second;
std::vector<std::string> splits;
Split(str, splits, ',');
uint32_t value = 0;
std::stringstream ss;
for (auto &it : splits) {
if (!it.empty()) {
std::stringstream ss(it);
ss << it;
ss >> value;
vector.push_back(value);
}
}
return APP_ERR_OK;
}
// new config
void ConfigParser::NewConfig(const std::string &fileName)
{
outfile_.open(fileName, std::ios::app);
return;
}
void ConfigParser::WriteString(const std::string &key, const std::string &value)
{
outfile_ << key << " = " << value << std::endl;
return;
}
void ConfigParser::WriteInt(const std::string &key, const int &value)
{
outfile_ << key << " = " << value << std::endl;
return;
}
void ConfigParser::WriteBool(const std::string &key, const bool &value)
{
outfile_ << key << " = " << value << std::endl;
return;
}
void ConfigParser::WriteFloat(const std::string &key, const float &value)
{
outfile_ << key << " = " << value << std::endl;
return;
}
void ConfigParser::WriteDouble(const std::string &key, const double &value)
{
outfile_ << key << " = " << value << std::endl;
return;
}
void ConfigParser::WriteUint32(const std::string &key, const uint32_t &value)
{
outfile_ << key << " = " << value << std::endl;
return;
}
void ConfigParser::SaveConfig()
{
outfile_.close();
return;
}

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CONFIG_PARSER_H
#define CONFIG_PARSER_H
#include <algorithm>
#include <fstream>
#include <iostream>
#include <map>
#include <sstream>
#include <string>
#include <vector>
#include "ErrorCode.h"
class ConfigParser {
public:
// Read the config file and save the useful infomation with the key-value pairs format in configData_
APP_ERROR ParseConfig(const std::string &fileName);
// Get the string value by key name
APP_ERROR GetStringValue(const std::string &name, std::string &value) const;
// Get the int value by key name
APP_ERROR GetIntValue(const std::string &name, int &value) const;
// Get the unsigned int value by key name
APP_ERROR GetUnsignedIntValue(const std::string &name, unsigned int &value) const;
// Get the bool value by key name
APP_ERROR GetBoolValue(const std::string &name, bool &value) const;
// Get the float value by key name
APP_ERROR GetFloatValue(const std::string &name, float &value) const;
// Get the double value by key name
APP_ERROR GetDoubleValue(const std::string &name, double &value) const;
// Get the vector by key name, split by ","
APP_ERROR GetVectorUint32Value(const std::string &name, std::vector<uint32_t> &vector) const;
void NewConfig(const std::string &fileName);
// Write the values into new config file
void WriteString(const std::string &key, const std::string &value);
void WriteInt(const std::string &key, const int &value);
void WriteBool(const std::string &key, const bool &value);
void WriteFloat(const std::string &key, const float &value);
void WriteDouble(const std::string &key, const double &value);
void WriteUint32(const std::string &key, const uint32_t &value);
void SaveConfig();
private:
std::map<std::string, std::string> configData_ = {}; // Variable to store key-value pairs
std::ofstream outfile_ = {};
inline void RemoveAllSpaces(std::string &str) const;
// Remove spaces from both left and right based on the string
inline void Trim(std::string &str) const;
};
#endif

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Log.h"
#include "ErrorCode.h"
std::string GetAppErrCodeInfo(const APP_ERROR err)
{
if ((err < APP_ERR_ACL_END) && (err >= APP_ERR_ACL_FAILURE)) {
return APP_ERR_ACL_LOG_STRING[((err < 0) ? (err + APP_ERR_ACL_END + 1) : err)];
} else if ((err < APP_ERR_COMM_END) && (err > APP_ERR_COMM_BASE)) {
return (err - APP_ERR_COMM_BASE) <
(int)sizeof(APP_ERR_COMMON_LOG_STRING) / (int)sizeof(APP_ERR_COMMON_LOG_STRING[0]) ?
APP_ERR_COMMON_LOG_STRING[err - APP_ERR_COMM_BASE] :
"Undefine the error code information";
} else if ((err < APP_ERR_DVPP_END) && (err > APP_ERR_DVPP_BASE)) {
return (err - APP_ERR_DVPP_BASE) <
(int)sizeof(APP_ERR_DVPP_LOG_STRING) / (int)sizeof(APP_ERR_DVPP_LOG_STRING[0]) ?
APP_ERR_DVPP_LOG_STRING[err - APP_ERR_DVPP_BASE] :
"Undefine the error code information";
} else if ((err < APP_ERR_QUEUE_END) && (err > APP_ERR_QUEUE_BASE)) {
return (err - APP_ERR_QUEUE_BASE) <
(int)sizeof(APP_ERR_QUEUE_LOG_STRING) / (int)sizeof(APP_ERR_QUEUE_LOG_STRING[0]) ?
APP_ERR_QUEUE_LOG_STRING[err - APP_ERR_QUEUE_BASE] :
"Undefine the error code information";
} else {
return "Error code unknown";
}
}
void AssertErrorCode(int code, std::string file, std::string function, int line)
{
if (code != APP_ERR_OK) {
LogError << "Failed at " << file << "->" << function << "->" << line << ": error code=" << code;
exit(code);
}
}
void CheckErrorCode(int code, std::string file, std::string function, int line)
{
if (code != APP_ERR_OK) {
LogError << "Failed at " << file << "->" << function << "->" << line << ": error code=" << code;
}
}

View File

@ -0,0 +1,255 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ERROR_CODE_H
#define ERROR_CODE_H
#include <string>
using APP_ERROR = int;
// define the data tpye of error code
enum {
APP_ERR_OK = 0,
// define the error code of ACL model, this is same with the aclError which is error code of ACL API
// Error codes 1~999 are reserved for the ACL. Do not add other error codes. Add it after APP_ERR_COMMON_ERR_BASE.
APP_ERR_ACL_FAILURE = -1, // ACL: general error
APP_ERR_ACL_ERR_BASE = 0,
APP_ERR_ACL_INVALID_PARAM = 1, // ACL: invalid parameter
APP_ERR_ACL_BAD_ALLOC = 2, // ACL: memory allocation fail
APP_ERR_ACL_RT_FAILURE = 3, // ACL: runtime failure
APP_ERR_ACL_GE_FAILURE = 4, // ACL: Graph Engine failure
APP_ERR_ACL_OP_NOT_FOUND = 5, // ACL: operator not found
APP_ERR_ACL_OP_LOAD_FAILED = 6, // ACL: fail to load operator
APP_ERR_ACL_READ_MODEL_FAILURE = 7, // ACL: fail to read model
APP_ERR_ACL_PARSE_MODEL = 8, // ACL: parse model failure
APP_ERR_ACL_MODEL_MISSING_ATTR = 9, // ACL: model missing attribute
APP_ERR_ACL_DESERIALIZE_MODEL = 10, // ACL: deserialize model failure
APP_ERR_ACL_EVENT_NOT_READY = 12, // ACL: event not ready
APP_ERR_ACL_EVENT_COMPLETE = 13, // ACL: event complete
APP_ERR_ACL_UNSUPPORTED_DATA_TYPE = 14, // ACL: unsupported data type
APP_ERR_ACL_REPEAT_INITIALIZE = 15, // ACL: repeat initialize
APP_ERR_ACL_COMPILER_NOT_REGISTERED = 16, // ACL: compiler not registered
APP_ERR_ACL_IO = 17, // ACL: IO failed
APP_ERR_ACL_INVALID_FILE = 18, // ACL: invalid file
APP_ERR_ACL_INVALID_DUMP_CONFIG = 19, // ACL: invalid dump comfig
APP_ERR_ACL_INVALID_PROFILING_CONFIG = 20, // ACL: invalid profiling config
APP_ERR_ACL_OP_TYPE_NOT_MATCH = 21, // ACL: operator type not match
APP_ERR_ACL_OP_INPUT_NOT_MATCH = 22, // ACL: operator input not match
APP_ERR_ACL_OP_OUTPUT_NOT_MATCH = 23, // ACL: operator output not match
APP_ERR_ACL_OP_ATTR_NOT_MATCH = 24, // ACL: operator attribute not match
APP_ERR_ACL_API_NOT_SUPPORT = 25, // ACL: API not support
APP_ERR_ACL_CREATE_DATA_BUF_FAILED = 26, // ACL: create data buffer fail
APP_ERR_ACL_END, // Not an error code, define the range of ACL error code
// define the common error code, range: 1001~1999
APP_ERR_COMM_BASE = 1000,
APP_ERR_COMM_FAILURE = APP_ERR_COMM_BASE + 1, // General Failed
APP_ERR_COMM_INNER = APP_ERR_COMM_BASE + 2, // Internal error
APP_ERR_COMM_INVALID_POINTER = APP_ERR_COMM_BASE + 3, // Invalid Pointer
APP_ERR_COMM_INVALID_PARAM = APP_ERR_COMM_BASE + 4, // Invalid parameter
APP_ERR_COMM_UNREALIZED = APP_ERR_COMM_BASE + 5, // Not implemented
APP_ERR_COMM_OUT_OF_MEM = APP_ERR_COMM_BASE + 6, // Out of memory
APP_ERR_COMM_ALLOC_MEM = APP_ERR_COMM_BASE + 7, // memory allocation error
APP_ERR_COMM_FREE_MEM = APP_ERR_COMM_BASE + 8, // free memory error
APP_ERR_COMM_OUT_OF_RANGE = APP_ERR_COMM_BASE + 9, // out of range
APP_ERR_COMM_NO_PERMISSION = APP_ERR_COMM_BASE + 10, // NO Permission
APP_ERR_COMM_TIMEOUT = APP_ERR_COMM_BASE + 11, // Timed out
APP_ERR_COMM_NOT_INIT = APP_ERR_COMM_BASE + 12, // Not initialized
APP_ERR_COMM_INIT_FAIL = APP_ERR_COMM_BASE + 13, // initialize failed
APP_ERR_COMM_INPROGRESS = APP_ERR_COMM_BASE + 14, // Operation now in progress
APP_ERR_COMM_EXIST = APP_ERR_COMM_BASE + 15, // Object, file or other resource already exist
APP_ERR_COMM_NO_EXIST = APP_ERR_COMM_BASE + 16, // Object, file or other resource doesn't exist
APP_ERR_COMM_BUSY = APP_ERR_COMM_BASE + 17, // Object, file or other resource is in use
APP_ERR_COMM_FULL = APP_ERR_COMM_BASE + 18, // No available Device or resource
APP_ERR_COMM_OPEN_FAIL = APP_ERR_COMM_BASE + 19, // Device, file or resource open failed
APP_ERR_COMM_READ_FAIL = APP_ERR_COMM_BASE + 20, // Device, file or resource read failed
APP_ERR_COMM_WRITE_FAIL = APP_ERR_COMM_BASE + 21, // Device, file or resource write failed
APP_ERR_COMM_DESTORY_FAIL = APP_ERR_COMM_BASE + 22, // Device, file or resource destory failed
APP_ERR_COMM_EXIT = APP_ERR_COMM_BASE + 23, // End of data stream, stop the application
APP_ERR_COMM_CONNECTION_CLOSE = APP_ERR_COMM_BASE + 24, // Out of connection, Communication shutdown
APP_ERR_COMM_CONNECTION_FAILURE = APP_ERR_COMM_BASE + 25, // connection fail
APP_ERR_COMM_STREAM_INVALID = APP_ERR_COMM_BASE + 26, // ACL stream is null pointer
APP_ERR_COMM_END, // Not an error code, define the range of common error code
// define the error code of DVPP
APP_ERR_DVPP_BASE = 2000,
APP_ERR_DVPP_CROP_FAIL = APP_ERR_DVPP_BASE + 1, // DVPP: crop fail
APP_ERR_DVPP_RESIZE_FAIL = APP_ERR_DVPP_BASE + 2, // DVPP: resize fail
APP_ERR_DVPP_CROP_RESIZE_FAIL = APP_ERR_DVPP_BASE + 3, // DVPP: corp and resize fail
APP_ERR_DVPP_CONVERT_FROMAT_FAIL = APP_ERR_DVPP_BASE + 4, // DVPP: convert image fromat fail
APP_ERR_DVPP_VPC_FAIL = APP_ERR_DVPP_BASE + 5, // DVPP: VPC(crop, resize, convert fromat) fail
APP_ERR_DVPP_JPEG_DECODE_FAIL = APP_ERR_DVPP_BASE + 6, // DVPP: decode jpeg or jpg fail
APP_ERR_DVPP_JPEG_ENCODE_FAIL = APP_ERR_DVPP_BASE + 7, // DVPP: encode jpeg or jpg fail
APP_ERR_DVPP_PNG_DECODE_FAIL = APP_ERR_DVPP_BASE + 8, // DVPP: encode png fail
APP_ERR_DVPP_H26X_DECODE_FAIL = APP_ERR_DVPP_BASE + 9, // DVPP: decode H264 or H265 fail
APP_ERR_DVPP_H26X_ENCODE_FAIL = APP_ERR_DVPP_BASE + 10, // DVPP: encode H264 or H265 fail
APP_ERR_DVPP_HANDLE_NULL = APP_ERR_DVPP_BASE + 11, // DVPP: acldvppChannelDesc is nullptr
// DVPP: fail to create acldvppCreatePicDesc or fail to set acldvppCreatePicDesc
APP_ERR_DVPP_PICDESC_FAIL = APP_ERR_DVPP_BASE + 12,
// DVPP: fail to set dvpp configuration,such as resize configuration,crop configuration
APP_ERR_DVPP_CONFIG_FAIL = APP_ERR_DVPP_BASE + 13,
APP_ERR_DVPP_OBJ_FUNC_MISMATCH = APP_ERR_DVPP_BASE + 14, // DVPP: DvppCommon object mismatch the function
APP_ERR_DVPP_END, // Not an error code, define the range of common error code
// define the error code of inference
APP_ERR_INFER_BASE = 3000,
APP_ERR_INFER_SET_INPUT_FAIL = APP_ERR_INFER_BASE + 1, // Infer: set input fail
APP_ERR_INFER_SET_OUTPUT_FAIL = APP_ERR_INFER_BASE + 2, // Infer: set output fail
APP_ERR_INFER_CREATE_OUTPUT_FAIL = APP_ERR_INFER_BASE + 3, // Infer: create output fail
APP_ERR_INFER_OP_SET_ATTR_FAIL = APP_ERR_INFER_BASE + 4, // Infer: set op attribute fail
APP_ERR_INFER_GET_OUTPUT_FAIL = APP_ERR_INFER_BASE + 5, // Infer: get model output fail
APP_ERR_INFER_FIND_MODEL_ID_FAIL = APP_ERR_INFER_BASE + 6, // Infer: find model id fail
APP_ERR_INFER_FIND_MODEL_DESC_FAIL = APP_ERR_INFER_BASE + 7, // Infer: find model description fail
APP_ERR_INFER_FIND_MODEL_MEM_FAIL = APP_ERR_INFER_BASE + 8, // Infer: find model memory fail
APP_ERR_INFER_FIND_MODEL_WEIGHT_FAIL = APP_ERR_INFER_BASE + 9, // Infer: find model weight fail
APP_ERR_INFER_END, // Not an error code, define the range of inference error code
// define the error code of transmission
APP_ERR_TRANS_BASE = 4000,
APP_ERR_TRANS_END, // Not an error code, define the range of transmission error code
// define the error code of blocking queue
APP_ERR_QUEUE_BASE = 5000,
APP_ERR_QUEUE_EMPTY = APP_ERR_QUEUE_BASE + 1, // Queue: empty queue
APP_ERR_QUEUE_STOPED = APP_ERR_QUEUE_BASE + 2, // Queue: queue stoped
APP_ERROR_QUEUE_FULL = APP_ERR_QUEUE_BASE + 3, // Queue: full queue
// define the idrecognition web error code
APP_ERROR_FACE_WEB_USE_BASE = 10000,
APP_ERROR_FACE_WEB_USE_SYSTEM_ERROR = APP_ERROR_FACE_WEB_USE_BASE + 1, // Web: system error
APP_ERROR_FACE_WEB_USE_MUL_FACE = APP_ERROR_FACE_WEB_USE_BASE + 2, // Web: multiple faces
APP_ERROR_FACE_WEB_USE_REPEAT_REG = APP_ERROR_FACE_WEB_USE_BASE + 3, // Web: repeat registration
APP_ERROR_FACE_WEB_USE_PART_SUCCESS = APP_ERROR_FACE_WEB_USE_BASE + 4, // Web: partial search succeeded
APP_ERROR_FACE_WEB_USE_NO_FACE = APP_ERROR_FACE_WEB_USE_BASE + 5, // Web: no face detected
APP_ERR_QUEUE_END, // Not an error code, define the range of blocking queue error code
};
const std::string APP_ERR_ACL_LOG_STRING[] = {
"Success", // APP_ERR_OK
"ACL: invalid parameter", // APP_ERR_ACL_INVALID_PARAM
"ACL: memory allocation fail", // APP_ERR_ACL_BAD_ALLOC
"ACL: runtime failure", // APP_ERR_ACL_RT_FAILURE
"ACL: Graph Engine failure", // APP_ERR_ACL_GE_FAILURE
"ACL: operator not found", // APP_ERR_ACL_OP_NOT_FOUND
"ACL: fail to load operator", // APP_ERR_ACL_OP_LOAD_FAILED
"ACL: fail to read model", // APP_ERR_ACL_READ_MODEL_FAILURE
"ACL: parse model failure", // APP_ERR_ACL_PARSE_MODEL
"ACL: model missing attribute", // APP_ERR_ACL_MODEL_MISSING_ATTR
"ACL: deserialize model failure", // APP_ERR_ACL_DESERIALIZE_MODEL
"Placeholder", // 11
"ACL: event not ready", // APP_ERR_ACL_EVENT_NOT_READY
"ACL: event complete", // APP_ERR_ACL_EVENT_COMPLETE
"ACL: unsupported data type", // APP_ERR_ACL_UNSUPPORTED_DATA_TYPE
"ACL: repeat initialize", // APP_ERR_ACL_REPEAT_INITIALIZE
"ACL: compiler not registered", // APP_ERR_ACL_COMPILER_NOT_REGISTERED
"ACL: IO failed", // APP_ERR_ACL_IO
"ACL: invalid file", // APP_ERR_ACL_INVALID_FILE
"ACL: invalid dump comfig", // APP_ERR_ACL_INVALID_DUMP_CONFIG
"ACL: invalid profiling config", // APP_ERR_ACL_INVALID_PROFILING_CONFIG
"ACL: operator type not match", // APP_ERR_ACL_OP_TYPE_NOT_MATCH
"ACL: operator input not match", // APP_ERR_ACL_OP_INPUT_NOT_MATCH
"ACL: operator output not match", // APP_ERR_ACL_OP_OUTPUT_NOT_MATCH
"ACL: operator attribute not match", // APP_ERR_ACL_OP_ATTR_NOT_MATCH
"ACL: API not supported", // APP_ERR_ACL_API_NOT_SUPPORT
"ACL: create data buffer fail", // APP_ERR_ACL_CREATE_DATA_BUF_FAILED
"ACL: general failure", // APP_ERR_ACL_FAILURE + APP_ERR_ACL_END + 1
};
const std::string APP_ERR_COMMON_LOG_STRING[] = {
"Placeholder", // 0
"General Failed", // APP_ERR_COMM_FAILURE - APP_ERR_COMM_BASE
"Internal error", // APP_ERR_COMM_INNER - APP_ERR_COMM_BASE
"Invalid Pointer", // APP_ERR_COMM_INVALID_POINTER - APP_ERR_COMM_BASE
"Invalid parameter", // APP_ERR_COMM_INVALID_PARAM - APP_ERR_COMM_BASE]
"Not implemented", // APP_ERR_COMM_UNREALIZED - APP_ERR_COMM_BASE
"Out of memory", // APP_ERR_COMM_OUT_OF_MEM - APP_ERR_COMM_BASE
"memory allocation error", // APP_ERR_COMM_ALLOC_MEM - APP_ERR_COMM_BASE
"free memory error", // APP_ERR_COMM_FREE_MEM - APP_ERR_COMM_BASE
"out of range", // APP_ERR_COMM_OUT_OF_RANGE - APP_ERR_COMM_BASE
"NO Permission ", // APP_ERR_COMM_NO_PERMISSION - APP_ERR_COMM_BASE
"Timed out", // APP_ERR_COMM_TIMEOUT - APP_ERR_COMM_BASE
"Not initialized", // APP_ERR_COMM_NOT_INIT - APP_ERR_COMM_BASE
"initialize failed", // APP_ERR_COMM_INIT_FAIL - APP_ERR_COMM_BASE
"Operation now in progress ", // APP_ERR_COMM_INPROGRESS - APP_ERR_COMM_BASE
"Object, file or other resource already exist", // APP_ERR_COMM_EXIST - APP_ERR_COMM_BASE
"Object, file or other resource already doesn't exist", // APP_ERR_COMM_NO_EXIST - APP_ERR_COMM_BASE
"Object, file or other resource is in use", // APP_ERR_COMM_BUSY - APP_ERR_COMM_BASE
"No available Device or resource", // APP_ERR_COMM_FULL - APP_ERR_COMM_BASE
"Device, file or resource open failed", // APP_ERR_COMM_OPEN_FAIL - APP_ERR_COMM_BASE
"Device, file or resource read failed", // APP_ERR_COMM_READ_FAIL - APP_ERR_COMM_BASE
"Device, file or resource write failed", // APP_ERR_COMM_WRITE_FAIL - APP_ERR_COMM_BASE
"Device, file or resource destory failed", // APP_ERR_COMM_DESTORY_FAIL - APP_ERR_COMM_BASE
" ", // APP_ERR_COMM_EXIT - APP_ERR_COMM_BASE
"Out of connection, Communication shutdown", // APP_ERR_COMM_CONNECTION_CLOSE - APP_ERR_COMM_BASE
"connection fail", // APP_ERR_COMM_CONNECTION_FAILURE - APP_ERR_COMM_BASE
"ACL stream is null pointer", // APP_ERR_COMM_STREAM_INVALID - APP_ERR_COMM_BASE
};
const std::string APP_ERR_DVPP_LOG_STRING[] = {
"Placeholder", // 0
"DVPP: crop fail", // [APP_ERR_DVPP_CROP_FAIL - APP_ERR_DVPP_BASE
"DVPP: resize fail", // APP_ERR_DVPP_RESIZE_FAIL - APP_ERR_DVPP_BASE
"DVPP: corp and resize fail", // APP_ERR_DVPP_CROP_RESIZE_FAIL - APP_ERR_DVPP_BASE
"DVPP: convert image format fail", // APP_ERR_DVPP_CONVERT_FROMAT_FAIL - APP_ERR_DVPP_BASE
"DVPP: VPC(crop, resize, convert format) fail", // APP_ERR_DVPP_VPC_FAIL - APP_ERR_DVPP_BASE
"DVPP: decode jpeg or jpg fail", // APP_ERR_DVPP_JPEG_DECODE_FAIL - APP_ERR_DVPP_BASE
"DVPP: encode jpeg or jpg fail", // APP_ERR_DVPP_JPEG_ENCODE_FAIL - APP_ERR_DVPP_BASE
"DVPP: encode png fail", // APP_ERR_DVPP_PNG_DECODE_FAIL - APP_ERR_DVPP_BASE
"DVPP: decode H264 or H265 fail", // APP_ERR_DVPP_H26X_DECODE_FAIL - APP_ERR_DVPP_BASE
"DVPP: encode H264 or H265 fail", // APP_ERR_DVPP_H26X_ENCODE_FAIL - APP_ERR_DVPP_BASE
"DVPP: acldvppChannelDesc is nullptr", // APP_ERR_DVPP_HANDLE_NULL - APP_ERR_DVPP_BASE
"DVPP: fail to create or set acldvppCreatePicDesc", // APP_ERR_DVPP_PICDESC_FAIL - APP_ERR_DVPP_BASE
"DVPP: fail to set dvpp configuration", // APP_ERR_DVPP_CONFIG_FAIL - APP_ERR_DVPP_BASE
"DVPP: DvppCommon object mismatch the function", // APP_ERR_DVPP_OBJ_FUNC_MISMATCH - APP_ERR_DVPP_BASE
};
const std::string APP_ERR_INFER_LOG_STRING[] = {
"Placeholder", // 0
"Infer: set input fail", // APP_ERR_INFER_SET_INPUT_FAIL - APP_ERR_INFER_BASE
"Infer: set output fail", // APP_ERR_INFER_SET_OUTPUT_FAIL - APP_ERR_INFER_BASE
"Infer: create output fail", // APP_ERR_INFER_CREATE_OUTPUT_FAIL - APP_ERR_INFER_BASE
"Infer: set op attribute fail", // APP_ERR_INFER_OP_SET_ATTR_FAIL - APP_ERR_INFER_BASE
"Infer: get model output fail", // APP_ERR_INFER_GET_OUTPUT_FAIL - APP_ERR_INFER_BASE
"Infer: find model id fail", // APP_ERR_INFER_FIND_MODEL_ID_FAIL - APP_ERR_INFER_BASE
"Infer: find model description fail", // APP_ERR_INFER_FIND_MODEL_DESC_FAIL - APP_ERR_INFER_BASE
"Infer: find model memory fail", // APP_ERR_INFER_FIND_MODEL_MEM_FAIL - APP_ERR_INFER_BASE
"Infer: find model weight fail", // APP_ERR_INFER_FIND_MODEL_WEIGHT_FAIL - APP_ERR_INFER_BASE
};
const std::string APP_ERR_QUEUE_LOG_STRING[] = {
"Placeholder", // 0
"empty queue", // APP_ERR_QUEUE_EMPTY - APP_ERR_QUEUE_BASE
"queue stoped", // APP_ERR_QUEUE_STOPED - APP_ERR_QUEUE_BASE
"full queue", // APP_ERROR_QUEUE_FULL - APP_ERR_QUEUE_BASE
};
const std::string APP_ERR_FACE_LOG_STRING[] = {
"Placeholder", // 0
"system error", // APP_ERROR_FACE_WEB_USE_SYSTEM_ERROR - APP_ERROR_FACE_WEB_USE_BASE
"multiple faces", // APP_ERROR_FACE_WEB_USE_MUL_FACE - APP_ERROR_FACE_WEB_USE_BASE
"repeat registration", // APP_ERROR_FACE_WEB_USE_REPEAT_REG - APP_ERROR_FACE_WEB_USE_BASE
"partial search succeeded", // APP_ERROR_FACE_WEB_USE_PART_SUCCESS - APP_ERROR_FACE_WEB_USE_BASE
"no face detected", // APP_ERROR_FACE_WEB_USE_NO_FACE - APP_ERROR_FACE_WEB_USE_BASE
};
std::string GetAppErrCodeInfo(APP_ERROR err);
void AssertErrorCode(int code, std::string file, std::string function, int line);
void CheckErrorCode(int code, std::string file, std::string function, int line);
#endif // ERROR_CODE_H_

View File

@ -0,0 +1,530 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "FileManager.h"
#include <ctime>
#include <string>
namespace {
const int BUFFER_SIZE = 2048;
#ifndef _WIN32
const mode_t DEFAULT_FILE_PERMISSION = 0077;
#endif
}
#ifndef _WIN32
mode_t SetFileDefaultUmask()
{
return umask(DEFAULT_FILE_PERMISSION);
}
mode_t SetFileUmask(mode_t newUmask)
{
return umask(newUmask);
}
#endif
/**
* Check whether the file exists.
*
* @param filePath the file path we want to check
* @return APP_ERR_OK if file exists, error code otherwise
*/
APP_ERROR ExistFile(const std::string &filePath)
{
std::string resolvedPath;
APP_ERROR ret = GetRealPath(filePath, resolvedPath);
if (ret != APP_ERR_OK) {
return ret;
}
#ifndef _WIN32
struct stat fileStat = {0};
if (stat(resolvedPath.c_str(), &fileStat) == 0 && S_ISREG(fileStat.st_mode)) {
#else
DWORD fileStat = GetFileAttributes((LPCSTR)resolvedPath.c_str());
if ((fileStat == FILE_ATTRIBUTE_ARCHIVE) || (fileStat == FILE_ATTRIBUTE_NORMAL) ||
(fileStat == FILE_ATTRIBUTE_DIRECTORY)) {
#endif
return APP_ERR_OK;
}
return APP_ERR_COMM_FAILURE;
}
/**
* Split the input path string with delimiter
*
* @param str path string
* @param delimiters a set of delimiter
* @return the vector of splitted path
*/
std::vector<std::string> SplitPath(const std::string &str, const std::set<char> delimiters)
{
std::vector<std::string> result;
std::string temp = str;
auto start = temp.begin();
auto it = temp.begin();
for (; it != temp.end(); ++it) {
if (delimiters.find(*it) != delimiters.end()) {
if (it != start) {
result.emplace_back(start, it);
} else {
result.emplace_back("");
}
start = it + 1;
}
}
result.emplace_back(start, it);
return result;
}
/**
* Create a directory
*
* @param dirPath the directory we want to create
* @return APP_ERR_OK if create success, error code otherwise
*/
APP_ERROR CreateDir(const std::string &dirPath)
{
#ifndef _WIN32
SetFileDefaultUmask();
if (dirPath.length() > PATH_MAX) {
LogError << dirPath << "is larger than " << std::to_string(PATH_MAX) << ".";
return APP_ERR_COMM_NO_EXIST;
}
#else
if (dirPath.length() > MAX_PATH) {
LogError << dirPath << "is larger than " << std::to_string(MAX_PATH) << ".";
}
return APP_ERR_COMM_NO_EXIST;
#endif
// Check the write authority of directory, if not exist, create it
int dirExist = access(dirPath.c_str(), W_OK);
if (-1 == dirExist) {
#ifdef _WIN32
if (_mkdir(dirPath.c_str()) == -1) {
#else
if (mkdir(dirPath.c_str(), S_IRUSR | S_IWUSR | S_IXUSR) == -1) {
#endif
return APP_ERR_COMM_NO_EXIST;
}
}
return APP_ERR_OK;
}
/**
* Create directory recursively
*
* @param file target directory to create
*/
void CreateDirRecursively(const std::string &file)
{
CreateDirRecursivelyByFile(file);
if (access(file.c_str(), 0) != 0) {
#ifndef _WIN32
int result = mkdir(file.c_str(), S_IRUSR | S_IWUSR | S_IXUSR); // for linux
#else
int result = _mkdir(file.c_str());
#endif
if (result < 0) {
LogError << "mkdir logs file " << file << " fail.";
return;
}
}
}
/**
* Create directory recursively by file
*
* @param file target file to create
*/
void CreateDirRecursivelyByFile(const std::string &file)
{
size_t pos = file.rfind('/'); // for linux
std::string filePath = file.substr(0, pos);
if (file.substr(0,1) == "/")
{
filePath = "." + filePath;
}
if (access(filePath.c_str(), 0) != 0) {
CreateDirRecursivelyByFile(filePath);
#ifndef _WIN32
int result = mkdir(filePath.c_str(), S_IRUSR | S_IWUSR | S_IXUSR); // for linux
#else
int result = _mkdir(filePath.c_str());
#endif
if (result < 0) {
LogError << "mkdir logs file " << filePath << " fail.";
return;
}
}
}
/**
* Read a file, store it into the RawData structure
*
* @param filePath file to read to
* @param fileData RawData structure to store in
* @return APP_ERR_OK if create success, error code otherwise
*/
APP_ERROR ReadFile(const std::string &filePath, RawData &fileData)
{
std::string resolvedPath;
APP_ERROR ret = GetRealPath(filePath, resolvedPath);
if (ret != APP_ERR_OK) {
return ret;
}
// Open file with reading mode
FILE *fp = fopen(resolvedPath.c_str(), "rb");
if (fp == nullptr) {
LogError << "Failed to open file";
return APP_ERR_COMM_OPEN_FAIL;
}
// Get the length of input file
fseek(fp, 0, SEEK_END);
long fileSize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// If file not empty, read it into FileInfo and return it
if (fileSize > 0) {
fileData.lenOfByte = fileSize;
fileData.data = std::make_shared<uint8_t>();
fileData.data.reset(new uint8_t[fileSize], std::default_delete<uint8_t[]>());
size_t readRet = fread(fileData.data.get(), 1, fileSize, fp);
if (readRet <= 0) {
fclose(fp);
return APP_ERR_COMM_READ_FAIL;
}
fclose(fp);
return APP_ERR_OK;
}
fclose(fp);
return APP_ERR_COMM_FAILURE;
}
/**
* Read a binary file, store the data into a uint8_t array
*
* @param fileName the file for reading
* @param buffShared a shared pointer to a uint8_t array for storing file
* @param buffLength the length of the array
* @return APP_ERR_OK if create success, error code otherwise
*/
APP_ERROR ReadBinaryFile(const std::string &fileName, std::shared_ptr<uint8_t> &buffShared, int &buffLength)
{
// read binary file
std::ifstream inFile(fileName, std::ios::in | std::ios::binary);
if (!inFile) {
LogError << "FaceFeatureLib: read file " << fileName << " fail.";
return APP_ERR_COMM_READ_FAIL;
}
// get length of file:
inFile.seekg(0, inFile.end);
buffLength = inFile.tellg();
inFile.seekg(0, inFile.beg);
auto tempShared = std::make_shared<uint8_t>();
tempShared.reset(new uint8_t[buffLength], std::default_delete<uint8_t[]>());
inFile.read((char *)tempShared.get(), buffLength);
inFile.close();
buffShared = tempShared;
LogDebug << "read file: fileName=" << fileName << ", size=" << buffLength << ".";
return APP_ERR_OK;
}
/**
* Read a file with specified offset
* Only used in Jpegd
*
* @param fileName the file for reading
* @param fileData RawData structure to store in
* @param offset offset for file
* @return APP_ERR_OK if create success, error code otherwise
*/
APP_ERROR ReadFileWithOffset(const std::string &fileName, RawData &fileData, const uint32_t offset)
{
std::string resolvedPath;
APP_ERROR ret = GetRealPath(fileName, resolvedPath);
if (ret != APP_ERR_OK) {
return ret;
}
// Open file with reading mode
FILE *fp = fopen(resolvedPath.c_str(), "rb");
if (fp == nullptr) {
LogError << "Failed to open file";
return APP_ERR_COMM_OPEN_FAIL;
}
// Get the length of input file
fseek(fp, 0, SEEK_END);
long fileSize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// If file not empty, read it into FileInfo and return it
if (fileSize > 0) {
fileData.lenOfByte = fileSize;
fileData.data = std::make_shared<uint8_t>();
fileData.data.reset(new uint8_t[fileSize + offset], std::default_delete<uint8_t[]>());
size_t readRet = fread(fileData.data.get(), 1, fileSize, fp);
if (readRet <= 0) {
fclose(fp);
return APP_ERR_COMM_READ_FAIL;
}
fclose(fp);
return APP_ERR_OK;
}
fclose(fp);
return APP_ERR_COMM_FAILURE;
}
APP_ERROR GetRealPath(const std::string &srcPath, std::string &resolvedPath)
{
// Get the absolute path of input file
#ifndef _WIN32
char path[PATH_MAX + 1] = {0};
if ((strlen(srcPath.c_str()) > PATH_MAX) || (realpath(srcPath.c_str(), path) == nullptr)) {
#else
#pragma comment(lib, "Shlwapi.lib")
char path[MAX_PATH + 1] = {0};
if ((strlen(srcPath.c_str()) > MAX_PATH) || (_fullpath(path, srcPath.c_str(), MAX_PATH) == nullptr)) {
#endif
LogError << "Failed to get canonicalize path----" << srcPath;
return APP_ERR_COMM_NO_EXIST;
}
resolvedPath = path;
return APP_ERR_OK;
}
/**
* Get the extension name of input file
*
* @param filePath the file for reading extension name
* @return extension name of the file
*/
std::string GetExtension(const std::string &filePath)
{
std::set<char> delims { '.' };
std::vector<std::string> path = SplitPath(filePath, delims);
return path[path.size() - 1];
}
/**
* Get file canonical name
*
* @param filePath absolute path of the target file
* @return filename of the file
*/
std::string GetName(const std::string &filePath)
{
std::set<char> delims { '/' };
std::vector<std::string> path = SplitPath(filePath, delims);
return (path.size() < 1) ? "" : path[path.size() - 1];
}
/**
* Get the Parent of input file
*
* @param filePath file for looking for parent
* @return parent of the file
*/
std::string GetParent(const std::string &filePath)
{
std::set<char> delims { '/' };
std::vector<std::string> path = SplitPath(filePath, delims);
return (path.size() < TWO) ? "" : path[path.size() - TWO];
}
/**
* Change the current directory
*
* @param dir target directory to change to
* @return APP_ERR_OK if create success, error code otherwise
*/
APP_ERROR ChangeDir(const std::string &dir)
{
std::string resolvedPath;
APP_ERROR ret = GetRealPath(dir, resolvedPath);
if (ret != APP_ERR_OK) {
return ret;
}
#ifndef _WIN32
char path[PATH_MAX + 1] = {0};
resolvedPath.copy(path, resolvedPath.length());
char *dName = dirname(path);
if (dName == nullptr) {
return APP_ERR_COMM_NO_EXIST;
}
#else
char path[MAX_PATH + 1] = {0};
resolvedPath.copy(path, resolvedPath.length());
if (!PathRemoveFileSpecA(path)) {
return APP_ERR_COMM_NO_EXIST;
}
char *dName = path;
#endif
if (chdir(dName) != 0) {
return APP_ERR_COMM_NO_EXIST;
}
return APP_ERR_OK;
}
/**
* Append stream to file
*
* @param fileName to append to
* @param stream content of string
* @param streamLength length of string
*/
void SaveFileAppend(const std::string &fileName, const std::string &stream, const int streamLength)
{
LogDebug << "saving binary file by app: fileName=" << fileName << ", streamLength=" << streamLength;
std::ofstream outfile(fileName, std::ios::app | std::ofstream::binary);
outfile.write(stream.c_str(), streamLength);
outfile.close();
}
/**
* Overwrite a file with stream
*
* @param fileName to overwrite to
* @param stream content of string
* @param streamLength length of string
*/
void SaveFileOverwrite(const std::string &fileName, const std::string &stream, const int streamLength)
{
LogDebug << "Saving binary file by over write: fileName=" << fileName << ", streamLength=" << streamLength;
std::ofstream outfile(fileName, std::ios::out | std::ofstream::binary);
outfile.write(stream.c_str(), streamLength);
outfile.close();
}
/**
* Copy file
*
* @param srcFile from source
* @param destFile to destination
*/
void CopyFile(const std::string &srcFile, const std::string &destFile)
{
std::ifstream in(srcFile, std::ios::binary);
if (!in) {
LogError << "Failed to get source file, it may be not exists. srcFile=" << srcFile;
return;
}
std::ofstream out(destFile, std::ios::binary);
if (!out) {
LogError << "Failed to save destination file. destFile=" << destFile;
in.close();
return;
}
char flush[BUFFER_SIZE];
while (!in.eof()) {
in.read(flush, BUFFER_SIZE);
out.write(flush, in.gcount());
}
out.close();
in.close();
}
/**
* Save file with timestamp under specified folder
*
* @param dataBuffer buffer of file
* @param bufferSize buffer size
* @param folderName specified folder will be created if it not existed
* @param fileName file name without suffix, the finally name will append time stamp to it
* @param fileSuffix suffix name of file
*/
APP_ERROR SaveFileWithTimeStamp(std::shared_ptr<void> dataBuffer, uint32_t bufferSize, std::string folderName,
std::string fileName, std::string fileSuffix)
{
#ifndef _WIN32
SetFileDefaultUmask();
#endif
APP_ERROR ret;
if (folderName.length() != 0) {
ret = CreateDir(folderName);
if (ret != APP_ERR_OK) {
return ret;
}
}
// Result file name use the time stamp as a suffix
std::string timeString;
GetCurTimeString(timeString);
// Create file under folderName directory
std::stringstream resultPathName;
if (folderName.length() == 0) {
resultPathName << "./" << fileName << "_" << timeString << fileSuffix;
} else {
resultPathName << folderName << "/" << fileName << "_" << timeString << fileSuffix;
}
std::string resolvedPath;
ret = GetRealPath(resultPathName.str(), resolvedPath);
if (ret != APP_ERR_OK) {
return ret;
}
FILE *fp = fopen(resolvedPath.c_str(), "wb");
if (fp == nullptr) {
LogError << "Failed to open file";
return APP_ERR_COMM_OPEN_FAIL;
}
uint32_t result = fwrite(dataBuffer.get(), 1, bufferSize, fp);
if (result != bufferSize) {
LogError << "Failed to write file";
fclose(fp);
return APP_ERR_COMM_WRITE_FAIL;
}
LogInfo << "Write result to file successfully";
uint32_t ff = fflush(fp);
if (ff != 0) {
LogError << "Failed to fflush file";
fclose(fp);
return APP_ERR_COMM_DESTORY_FAIL;
}
uint32_t fc = fclose(fp);
if (fc != 0) {
LogError << "Failed to fclose file";
return APP_ERR_COMM_DESTORY_FAIL;
}
return APP_ERR_OK;
}
/**
* Convert the current time to the format "%Y%m%d%H%M%S"
*
* @param timeString buffer to save the time string with format "%Y%m%d%H%M%S"
*/
void GetCurTimeString(std::string &timeString)
{
// Result file name use the time stamp as a suffix
const int timeZoneDiff = 28800; // 8 hour time difference
const int timeStringSize = 32;
char timeStr[timeStringSize] = {0};
time_t tmValue = time(nullptr) + timeZoneDiff;
struct tm tmStruct = {0};
#ifdef _WIN32
if (0 == gmtime_s(&tmStruct, &tmValue)) {
#else
if (nullptr != gmtime_r(&tmValue, &tmStruct)) {
#endif
strftime(timeStr, sizeof(timeStr), "%Y%m%d%H%M%S", &tmStruct);
}
timeString = timeStr;
return;
}

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FILEMANAGER_H
#define FILEMANAGER_H
#ifdef _WIN32
#include <io.h>
#include <direct.h>
#include <windows.h>
#include <shlwapi.h>
#else
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <libgen.h>
#endif
#include <cstring>
#include <fstream>
#include <algorithm>
#include <vector>
#include <memory>
#include <cstdio>
#include <iostream>
#include <set>
#include "CommonDataType.h"
#include "Log.h"
#include "ErrorCode.h"
#ifdef _WIN32
#define access _access
#endif
#define BUF_SIZE 32U // Max buffer size
const int TWO = 2;
static const std::string SLASH = "/"; // delimiter used to split path
#ifndef _WIN32
mode_t SetFileDefaultUmask();
mode_t SetFileUmask(mode_t newUmask);
#endif
APP_ERROR GetRealPath(const std::string &srcPath, std::string &resolvedPath);
APP_ERROR ExistFile(const std::string &filePath);
std::vector<std::string> SplitPath(const std::string &str, const std::set<char> delimiters);
APP_ERROR CreateDir(const std::string &dirPath);
void CreateDirRecursively(const std::string &file);
void CreateDirRecursivelyByFile(const std::string &file);
APP_ERROR ReadFile(const std::string &filePath, RawData &fileData);
APP_ERROR ReadFileWithOffset(const std::string &fileName, RawData &fileData, const uint32_t offset);
APP_ERROR ReadBinaryFile(const std::string &fileName, std::shared_ptr<uint8_t> &buffShared, int &buffLength);
std::string GetExtension(const std::string &filePath);
std::string GetName(const std::string &filePath);
std::string GetParent(const std::string &filePath);
APP_ERROR ChangeDir(const std::string &dir);
void SaveFileAppend(const std::string &fileName, const std::string &stream, const int streamLength);
void SaveFileOverwrite(const std::string &fileName, const std::string &stream, const int streamLength);
void CopyFile(const std::string &srcFile, const std::string &destFile);
APP_ERROR SaveFileWithTimeStamp(std::shared_ptr<void> imageBuffer, uint32_t bufferSize, std::string folderName,
std::string fileName, std::string fileSuffix);
void GetCurTimeString(std::string &timeString);
#endif

203
src/base/Log/Log.cpp Normal file
View File

@ -0,0 +1,203 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fstream>
#include <iomanip>
#include <iostream>
#ifdef _WIN32
#include <windows.h>
#include <stdio.h>
#else
#include <sys/time.h>
#endif
#include <FileManager.h>
#include "Log.h"
namespace MatrixAiLog
{
const int TIME_SIZE = 32;
const int TIME_DIFF = 28800; // 8 hour
const int BYTES6 = 6;
const int FILE_SIZE = 52428800; // 50M
uint32_t Log::logLevel = LOG_LEVEL_INFO;
std::vector<std::string> Log::levelString{"[Debug]", "[Info ]", "[Warn ]", "[Error]", "[Fatal]"};
std::mutex Log::mutex;
std::string Log::logPath = "./logs";
std::string Log::logFile = "log.log"; // default log file
std::string Log::logFileBak = "log.log.bak";
Log::Log(std::string file, std::string function, int line, uint32_t level)
: myLevel_(level), file_(file), function_(function), line_(line)
{
}
Log::~Log()
{
if (myLevel_ >= logLevel)
{
std::lock_guard<std::mutex> locker(mutex);
// cout to screen
std::cout << ss_.str() << std::endl;
std::string outPath = logPath + std::string("/") + logFile;
// log to the file
CreateDirRecursivelyByFile(outPath);
std::ofstream fs(outPath, std::ios::app);
if (!fs)
{
std::cout << "open file " << outPath << " fail" << std::endl;
return;
}
fs.seekp(0, fs.end);
size_t dstFileSize = fs.tellp();
fs << ss_.str() << std::endl;
fs.close();
if (dstFileSize < FILE_SIZE)
{
return;
}
std::string bakPath = logPath + std::string("/") + logFileBak;
// dstFileSize >= FILE_SIZE
if (access(bakPath.c_str(), 0) == APP_ERR_OK)
{
APP_ERROR ret = remove(bakPath.c_str());
if (ret != APP_ERR_OK)
{
std::cout << "remove " << bakPath << " failed." << std::endl;
return;
}
}
APP_ERROR ret = rename(outPath.c_str(), bakPath.c_str());
if (ret != APP_ERR_OK)
{
std::cout << "rename " << outPath << " failed." << std::endl;
return;
}
}
};
std::ostringstream &Log::Stream()
{
if (myLevel_ >= logLevel)
{
long int usValue = 0;
#ifndef _WIN32
struct timeval time = {0, 0};
gettimeofday(&time, nullptr);
time_t timep = time.tv_sec + TIME_DIFF;
struct tm *ptm = gmtime(&timep);
char timeString[TIME_SIZE] = {0};
strftime(timeString, TIME_SIZE, "[%F %X:", ptm);
usValue = time.tv_usec;
#else
SYSTEMTIME sysTimes;
GetLocalTime(&sysTimes);
std::string timeString;
timeString = '[' + std::to_string(sysTimes.wYear) + '-' + std::to_string(sysTimes.wMonth) + '-' +
std::to_string(sysTimes.wDay) + " " + std::to_string(sysTimes.wHour) + ':' +
std::to_string(sysTimes.wMinute) + ':' + std::to_string(sysTimes.wSecond) + ':';
uint32_t msToUs = 1000;
usValue = sysTimes.wMilliseconds * msToUs;
#endif
date_ = timeString;
ss_.fill('0');
ss_ << levelString[myLevel_] << timeString << std::setw(BYTES6) << usValue << "]";
std::string fileName = file_.substr(file_.rfind('/') + 1);
ss_ << "[" << fileName << " " << function_ << ":" << line_ << "] ";
}
return ss_;
}
void Log::LogDebugOn()
{
logLevel = LOG_LEVEL_DEBUG;
return;
}
void Log::LogInfoOn()
{
logLevel = LOG_LEVEL_INFO;
return;
}
void Log::LogWarnOn()
{
logLevel = LOG_LEVEL_WARN;
return;
}
void Log::LogErrorOn()
{
logLevel = LOG_LEVEL_ERROR;
return;
}
void Log::LogFatalOn()
{
logLevel = LOG_LEVEL_FATAL;
return;
}
void Log::LogAllOn()
{
logLevel = LOG_LEVEL_DEBUG;
return;
}
void Log::LogAllOff()
{
logLevel = LOG_LEVEL_NONE;
return;
}
void Log::SetLogLevel(const std::string &log_level)
{
if(log_level == "DEBUG")
{
logLevel = LOG_LEVEL_DEBUG;
}
else if(log_level == "INFO")
{
logLevel = LOG_LEVEL_INFO;
}
else if(log_level == "WARN")
{
logLevel = LOG_LEVEL_WARN;
}
else if(log_level == "ERROR")
{
logLevel = LOG_LEVEL_ERROR;
}
else if(log_level == "FATAL")
{
logLevel = LOG_LEVEL_FATAL;
}
else if(log_level == "NONE")
{
logLevel = LOG_LEVEL_NONE;
}
}
void Log::SetLogFile(const std::string &log_file)
{
if (!log_file.empty())
{
logPath = log_file;
}
}
#define LOG_DEBUG Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_DEBUG)
#define LOG_INFO Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_INFO)
#define LOG_WARN Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_WARN)
#define LOG_ERROR Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_ERROR)
#define LOG_FATAL Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_FATAL)
} // namespace AtlasAscendLog

75
src/base/Log/Log.h Normal file
View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LOG_H
#define LOG_H
#include <mutex>
#include <sstream>
#include <string>
#include <vector>
namespace MatrixAiLog {
// log level
enum LogLevels {
LOG_LEVEL_DEBUG = 0,
LOG_LEVEL_INFO = 1,
LOG_LEVEL_WARN = 2,
LOG_LEVEL_ERROR = 3,
LOG_LEVEL_FATAL = 4,
LOG_LEVEL_NONE
};
class Log {
public:
Log(std::string file, std::string function, int line, uint32_t level);
~Log();
std::ostringstream &Stream();
// log switch, turn on and off both screen and file log of special level.
static void LogDebugOn();
static void LogInfoOn();
static void LogWarnOn();
static void LogErrorOn();
static void LogFatalOn();
static void LogAllOn();
static void LogAllOff();
static void SetLogLevel(const std::string &log_level);
static void SetLogFile(const std::string &log_file);
private:
std::ostringstream ss_;
uint32_t myLevel_;
std::string date_;
std::string file_;
std::string function_;
int line_;
static uint32_t logLevel;
static std::vector<std::string> levelString;
static std::mutex mutex;
static std::string logPath;
static std::string logFile;
static std::string logFileBak;
};
} // namespace MatrixAiLog
#define LogDebug MatrixAiLog::Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_DEBUG).Stream()
#define LogInfo MatrixAiLog::Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_INFO).Stream()
#define LogWarn MatrixAiLog::Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_WARN).Stream()
#define LogError MatrixAiLog::Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_ERROR).Stream()
#define LogFatal MatrixAiLog::Log(__FILE__, __FUNCTION__, __LINE__, MatrixAiLog::LOG_LEVEL_FATAL).Stream()
#define LOG(security) MatrixAiLog::LOG_##security.Stream()
#endif

View File

@ -0,0 +1,20 @@
#include "cuda_utils.h"
inline unsigned int getElementSize(nvinfer1::DataType t)
{
switch (t){
case nvinfer1::DataType::kINT32: return 4;
case nvinfer1::DataType::kFLOAT: return 4;
case nvinfer1::DataType::kHALF: return 2;
case nvinfer1::DataType::kBOOL:
case nvinfer1::DataType::kINT8: return 1;
}
throw std::runtime_error("Invalid DataType.");
return 0;
}
inline int64_t volume(const nvinfer1::Dims& d)
{
return std::accumulate(d.d, d.d + d.nbDims, 1, std::multiplies<int64_t>());
}

View File

@ -0,0 +1,41 @@
#ifndef _CUDA_UTILS_H_
#define _CUDA_UTILS_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <iostream>
#include <numeric>
#include <stdint.h>
#include <NvInfer.h>
#include <NvInferPlugin.h>
#include <NvOnnxParser.h>
#include <NvCaffeParser.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace std;
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr)\
{\
cudaError_t error_code = callstr;\
if (error_code != cudaSuccess) {\
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
assert(0);\
}\
}
#endif // CUDA_CHECK
inline unsigned int getElementSize(nvinfer1::DataType t);
inline int64_t volume(const nvinfer1::Dims& d);
#endif //END OF _CUDA_UTILS_H_

View File

@ -0,0 +1,504 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORRT_LOGGING_H
#define TENSORRT_LOGGING_H
#include "NvInferRuntimeCommon.h"
#include <cassert>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
#include "macros.h"
using Severity = nvinfer1::ILogger::Severity;
class LogStreamConsumerBuffer : public std::stringbuf
{
public:
LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mOutput(stream)
, mPrefix(prefix)
, mShouldLog(shouldLog)
{
}
LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
: mOutput(other.mOutput)
{
}
~LogStreamConsumerBuffer()
{
// std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
// std::streambuf::pptr() gives a pointer to the current position of the output sequence
// if the pointer to the beginning is not equal to the pointer to the current position,
// call putOutput() to log the output to the stream
if (pbase() != pptr())
{
putOutput();
}
}
// synchronizes the stream buffer and returns 0 on success
// synchronizing the stream buffer consists of inserting the buffer contents into the stream,
// resetting the buffer and flushing the stream
virtual int sync()
{
putOutput();
return 0;
}
void putOutput()
{
if (mShouldLog)
{
// prepend timestamp
std::time_t timestamp = std::time(nullptr);
tm* tm_local = std::localtime(&timestamp);
std::cout << "[";
std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
// std::stringbuf::str() gets the string contents of the buffer
// insert the buffer contents pre-appended by the appropriate prefix into the stream
mOutput << mPrefix << str();
// set the buffer to empty
str("");
// flush the stream
mOutput.flush();
}
}
void setShouldLog(bool shouldLog)
{
mShouldLog = shouldLog;
}
private:
std::ostream& mOutput;
std::string mPrefix;
bool mShouldLog;
};
//!
//! \class LogStreamConsumerBase
//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
//!
class LogStreamConsumerBase
{
public:
LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mBuffer(stream, prefix, shouldLog)
{
}
protected:
LogStreamConsumerBuffer mBuffer;
};
//!
//! \class LogStreamConsumer
//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
//! Order of base classes is LogStreamConsumerBase and then std::ostream.
//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
//! Please do not change the order of the parent classes.
//!
class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
{
public:
//! \brief Creates a LogStreamConsumer which logs messages with level severity.
//! Reportable severity determines if the messages are severe enough to be logged.
LogStreamConsumer(Severity reportableSeverity, Severity severity)
: LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(severity <= reportableSeverity)
, mSeverity(severity)
{
}
LogStreamConsumer(LogStreamConsumer&& other)
: LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(other.mShouldLog)
, mSeverity(other.mSeverity)
{
}
void setReportableSeverity(Severity reportableSeverity)
{
mShouldLog = mSeverity <= reportableSeverity;
mBuffer.setShouldLog(mShouldLog);
}
private:
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
static std::string severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
bool mShouldLog;
Severity mSeverity;
};
//! \class Logger
//!
//! \brief Class which manages logging of TensorRT tools and samples
//!
//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
//! and supports logging two types of messages:
//!
//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
//! - Test pass/fail messages
//!
//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
//!
//! In the future, this class could be extended to support dumping test results to a file in some standard format
//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
//!
//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
//! library and messages coming from the sample.
//!
//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
//! object.
class Logger : public nvinfer1::ILogger
{
public:
Logger(Severity severity = Severity::kWARNING)
: mReportableSeverity(severity)
{
}
//!
//! \enum TestResult
//! \brief Represents the state of a given test
//!
enum class TestResult
{
kRUNNING, //!< The test is running
kPASSED, //!< The test passed
kFAILED, //!< The test failed
kWAIVED //!< The test was waived
};
//!
//! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
//! \return The nvinfer1::ILogger associated with this Logger
//!
//! TODO Once all samples are updated to use this method to register the logger with TensorRT,
//! we can eliminate the inheritance of Logger from ILogger
//!
nvinfer1::ILogger& getTRTLogger()
{
return *this;
}
//!
//! \brief Implementation of the nvinfer1::ILogger::log() virtual method
//!
//! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
//! inheritance from nvinfer1::ILogger
//!
void log(Severity severity, const char* msg) TRT_NOEXCEPT override
{
LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
}
//!
//! \brief Method for controlling the verbosity of logging output
//!
//! \param severity The logger will only emit messages that have severity of this level or higher.
//!
void setReportableSeverity(Severity severity)
{
mReportableSeverity = severity;
}
//!
//! \brief Opaque handle that holds logging information for a particular test
//!
//! This object is an opaque handle to information used by the Logger to print test results.
//! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
//! with Logger::reportTest{Start,End}().
//!
class TestAtom
{
public:
TestAtom(TestAtom&&) = default;
private:
friend class Logger;
TestAtom(bool started, const std::string& name, const std::string& cmdline)
: mStarted(started)
, mName(name)
, mCmdline(cmdline)
{
}
bool mStarted;
std::string mName;
std::string mCmdline;
};
//!
//! \brief Define a test for logging
//!
//! \param[in] name The name of the test. This should be a string starting with
//! "TensorRT" and containing dot-separated strings containing
//! the characters [A-Za-z0-9_].
//! For example, "TensorRT.sample_googlenet"
//! \param[in] cmdline The command line used to reproduce the test
//
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
//!
static TestAtom defineTest(const std::string& name, const std::string& cmdline)
{
return TestAtom(false, name, cmdline);
}
//!
//! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
//! as input
//!
//! \param[in] name The name of the test
//! \param[in] argc The number of command-line arguments
//! \param[in] argv The array of command-line arguments (given as C strings)
//!
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
{
auto cmdline = genCmdlineString(argc, argv);
return defineTest(name, cmdline);
}
//!
//! \brief Report that a test has started.
//!
//! \pre reportTestStart() has not been called yet for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has started
//!
static void reportTestStart(TestAtom& testAtom)
{
reportTestResult(testAtom, TestResult::kRUNNING);
assert(!testAtom.mStarted);
testAtom.mStarted = true;
}
//!
//! \brief Report that a test has ended.
//!
//! \pre reportTestStart() has been called for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has ended
//! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
//! TestResult::kFAILED, TestResult::kWAIVED
//!
static void reportTestEnd(const TestAtom& testAtom, TestResult result)
{
assert(result != TestResult::kRUNNING);
assert(testAtom.mStarted);
reportTestResult(testAtom, result);
}
static int reportPass(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kPASSED);
return EXIT_SUCCESS;
}
static int reportFail(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kFAILED);
return EXIT_FAILURE;
}
static int reportWaive(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kWAIVED);
return EXIT_SUCCESS;
}
static int reportTest(const TestAtom& testAtom, bool pass)
{
return pass ? reportPass(testAtom) : reportFail(testAtom);
}
Severity getReportableSeverity() const
{
return mReportableSeverity;
}
private:
//!
//! \brief returns an appropriate string for prefixing a log message with the given severity
//!
static const char* severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate string for prefixing a test result message with the given result
//!
static const char* testResultString(TestResult result)
{
switch (result)
{
case TestResult::kRUNNING: return "RUNNING";
case TestResult::kPASSED: return "PASSED";
case TestResult::kFAILED: return "FAILED";
case TestResult::kWAIVED: return "WAIVED";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
//!
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
//!
//! \brief method that implements logging test results
//!
static void reportTestResult(const TestAtom& testAtom, TestResult result)
{
severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
<< testAtom.mCmdline << std::endl;
}
//!
//! \brief generate a command line string from the given (argc, argv) values
//!
static std::string genCmdlineString(int argc, char const* const* argv)
{
std::stringstream ss;
for (int i = 0; i < argc; i++)
{
if (i > 0)
ss << " ";
ss << argv[i];
}
return ss.str();
}
Severity mReportableSeverity;
};
namespace
{
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
//!
//! Example usage:
//!
//! LOG_VERBOSE(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
//!
//! Example usage:
//!
//! LOG_INFO(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_INFO(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
//!
//! Example usage:
//!
//! LOG_WARN(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_WARN(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
//!
//! Example usage:
//!
//! LOG_ERROR(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_ERROR(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
// ("fatal" severity)
//!
//! Example usage:
//!
//! LOG_FATAL(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_FATAL(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
}
} // anonymous namespace
#endif // TENSORRT_LOGGING_H

View File

@ -0,0 +1,27 @@
#ifndef __MACROS_H
#define __MACROS_H
#ifdef API_EXPORTS
#if defined(_MSC_VER)
#define API __declspec(dllexport)
#else
#define API __attribute__((visibility("default")))
#endif
#else
#if defined(_MSC_VER)
#define API __declspec(dllimport)
#else
#define API
#endif
#endif // API_EXPORTS
#if NV_TENSORRT_MAJOR >= 8
#define TRT_NOEXCEPT noexcept
#define TRT_CONST_ENQUEUE const
#else
#define TRT_NOEXCEPT
#define TRT_CONST_ENQUEUE
#endif
#endif // __MACROS_H

View File

@ -0,0 +1,270 @@
#include "inference.h"
template<typename _T>
static std::string join_dims(const std::vector<_T>& dims)
{
std::stringstream output;
char buf[64];
const char* fmts[] = {"%d", " x %d"};
for(int i = 0; i < dims.size(); ++i){
snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]);
output << buf;
}
return output.str();
}
Inference::Inference() {}
Inference::~Inference() {}
inline unsigned int Inference::getElementSize(nvinfer1::DataType t)
{
switch (t)
{
case nvinfer1::DataType::kINT32: return 4;
case nvinfer1::DataType::kFLOAT: return 4;
case nvinfer1::DataType::kHALF: return 2;
case nvinfer1::DataType::kBOOL:
case nvinfer1::DataType::kINT8: return 1;
}
throw std::runtime_error("Invalid DataType.");
return 0;
}
inline int64_t Inference::volume(const nvinfer1::Dims& d)
{
return std::accumulate(d.d, d.d + d.nbDims, 1, std::multiplies<int64_t>());
}
//onnx解析器
ICudaEngine* Inference::build_engine_onnx(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IBuilder* builder, IBuilderConfig* config, std::string& source_onnx)
{
const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
INetworkDefinition* network = builder->createNetworkV2(explicitBatch);
//创建onnx解析器
nvonnxparser::IParser* onnxParser = nvonnxparser::createParser(*network, gLogger);
//解析onnx文件
onnxParser->parseFromFile(source_onnx.c_str(), 1);
// Build engine
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(maxWorkSpaceSize); // 16MB
float max_workspace_size = (float)maxWorkSpaceSize/1024.0f/1024.0f;
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#endif
std::cout<<"Set max batch size = "<<maxBatchSize<<std::endl; //最大batch size
std::cout<<"Set max workspace size = "<<max_workspace_size<<" MB"<<std::endl; //最大workspace size
int net_num_input = network->getNbInputs(); //获取网络输入个数
printf("Network has %d inputs:\n", net_num_input);
std::vector<std::string> input_names(net_num_input);
for(int i = 0; i < net_num_input; ++i){ //获取每个输入的张量及张量维度
auto tensor = network->getInput(i);
auto dims = tensor->getDimensions();
auto dims_str = join_dims(std::vector<int>(dims.d, dims.d+dims.nbDims));
printf(" %d.[%s] shape is %s\n", i, tensor->getName(), dims_str.c_str());
input_names[i] = tensor->getName();
}
int net_num_output = network->getNbOutputs(); //获取网络输出个数
printf("Network has %d outputs:\n", net_num_output);
for(int i = 0; i < net_num_output; ++i){ //获取每个输出的张量及张量维度
auto tensor = network->getOutput(i);
auto dims = tensor->getDimensions();
auto dims_str = join_dims(std::vector<int>(dims.d, dims.d+dims.nbDims));
printf(" %d.[%s] shape is %s\n", i, tensor->getName(), dims_str.c_str());
}
int net_num_layers = network->getNbLayers(); //获取网络层数
printf("Network has %d layers\n", net_num_layers);
//配置OptimizationProfile文件(最佳优化)
auto profile = builder->createOptimizationProfile();
for(int i = 0; i < net_num_input; ++i){
auto input = network->getInput(i);
auto input_dims = input->getDimensions();
input_dims.d[0] = 1;
profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
input_dims.d[0] = maxBatchSize;
profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
}
config->addOptimizationProfile(profile); //builderconfig里面添加OptimizationProfile文件
std::cout << "Building engine with onnx parser, please wait for a while..." << std::endl;
//计时 计算编译时间
auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count();
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count();
std::cout << "Build engine with onnx parser successfully!" << std::endl;
printf("Build done %lld ms !\n", time_end - time_start);
// Don't need the network any more
onnxParser->destroy();
network->destroy();
return engine;
}
ICudaEngine* Inference::build_engine_caffe(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IBuilder* builder, IBuilderConfig* config,
const std::string& strCaffeModelFile, const std::string& strCaffeDeployFile, const std::vector<std::string>& vecOutputs)
{
// 创建network
INetworkDefinition* network = builder->createNetworkV2(0);
// 创建caffe解析器
ICaffeParser* caffeParser = createCaffeParser();
const IBlobNameToTensor *blobNameToTensor = caffeParser->parse(strCaffeDeployFile.c_str(),
strCaffeModelFile.c_str(),
*network,
nvinfer1::DataType::kFLOAT);
//标记输出
for (auto& s : vecOutputs){
network->markOutput(*blobNameToTensor->find(s.c_str()));
}
//设置batch_size和workspace size
builder->setMaxBatchSize(maxBatchSize);
config->setMaxWorkspaceSize(maxWorkSpaceSize);
config->setFlag(BuilderFlag::kGPU_FALLBACK);
config->setFlag(BuilderFlag::kSTRICT_TYPES);
// FP16精度
#if defined(USE_FP16)
config->setFlag(BuilderFlag::kFP16);
#endif
float max_workspace_size = (float)maxWorkSpaceSize/1024.0f/1024.0f;
std::cout<<"Set max batch size = "<<maxBatchSize<<std::endl; //最大batch_size
std::cout<<"Set max workspace size = "<<max_workspace_size<<" MB"<<std::endl; //最大batch_size
int net_num_input = network->getNbInputs(); //获取网络输入个数
printf("Network has %d inputs:\n", net_num_input);
std::vector<std::string> input_names(net_num_input);
for(int i = 0; i < net_num_input; ++i){ //获取每个输入的张量及张量维度
auto tensor = network->getInput(i);
auto dims = tensor->getDimensions();
auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims));
printf(" %d.[%s] shape is %s\n", i, tensor->getName(), dims_str.c_str());
input_names[i] = tensor->getName();
}
int net_num_output = network->getNbOutputs(); //获取网络输出个数
printf("Network has %d outputs:\n", net_num_output);
for(int i = 0; i < net_num_output; ++i){ //获取每个输出的张量及张量维度
auto tensor = network->getOutput(i);
auto dims = tensor->getDimensions();
auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims));
printf(" %d.[%s] shape is %s\n", i, tensor->getName(), dims_str.c_str());
}
int net_num_layers = network->getNbLayers(); //获取网络层数
printf("Network has %d layers\n", net_num_layers);
//编译引擎
//计时 计算编译时间
std::cout << "Building engine with caffe parser, please wait for a while..." << std::endl;
auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count();
ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
assert(engine);
auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count();
std::cout << "Build engine with caffe parser successfully!" << std::endl;
printf("Build done %lld ms !\n", time_end - time_start);
//释放所有资源
caffeParser->destroy();
network->destroy();
return engine;
}
//转换模型
void Inference::ONNXToModel(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IHostMemory** modelStream, std::string& onnx_model_name)
{
IBuilder* builder = createInferBuilder(gLogger); //创建builder(要传入gLogger)
IBuilderConfig* config = builder->createBuilderConfig(); //创建builderconfig
// 创建模型来填充网络,然后设置输出并创建一个引擎
ICudaEngine *engine = nullptr;
engine = build_engine_onnx(gLogger, maxBatchSize, maxWorkSpaceSize, builder, config, onnx_model_name);
assert(engine != nullptr);
//序列化引擎生成模型流
(*modelStream) = engine->serialize();
//释放相关资源
engine->destroy();
builder->destroy();
config->destroy();
}
void Inference::CaffeToModel(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IHostMemory** modelStream, std::string& caffe_model_name, std::string& caffe_deploy_name, std::vector<std::string>& outputs)
{
IBuilder* builder = createInferBuilder(gLogger); //创建builder(要传入gLogger)
IBuilderConfig* config = builder->createBuilderConfig(); //创建builderconfig
// 创建模型来填充网络,然后设置输出并创建一个引擎
ICudaEngine *engine = nullptr;
engine = build_engine_caffe(gLogger, maxBatchSize, maxWorkSpaceSize, builder, config, caffe_model_name, caffe_deploy_name, outputs);
assert(engine != nullptr);
//序列化引擎生成模型流
(*modelStream) = engine->serialize();
//释放相关资源
engine->destroy();
builder->destroy();
config->destroy();
}
//执行推理1
void Inference::doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int inputIndex, float* input, int inputSize,
unsigned int ouputIndex, float* output, int outputSize, int batchSize)
{
CUDA_CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * inputSize * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
// context.enqueueV2(buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[ouputIndex], batchSize * outputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
//执行推理2
void Inference::doInferenceV2(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int outputIndex, float* output, int outputSize, int batchSize)
{
context.enqueue(batchSize, buffers, stream, nullptr);
// context.enqueueV2(buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * outputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
//执行推理3
void Inference::doInferenceV3(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int inputIndex, float* input, int inputSize,
unsigned int ouputIndex, float* output, int outputSize, int batchSize)
{
CUDA_CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * inputSize * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueueV2(buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[ouputIndex], batchSize * outputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}
//执行推理4
void Inference::doInferenceV4(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int outputIndex, float* output, int outputSize, int batchSize)
{
context.enqueueV2(buffers, stream, nullptr);
CUDA_CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * outputSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
}

View File

@ -0,0 +1,88 @@
#ifndef _INFERENCE_H_
#define _INFERENCE_H_
#include <algorithm>
#include <chrono>
#include <cstdint>
#include <fstream>
#include <functional>
#include <iostream>
#include <numeric>
#include <vector>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <NvInfer.h>
#include <NvInferPlugin.h>
#include <NvOnnxParser.h>
#include <NvCaffeParser.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_utils.h"
#include "logging.h"
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace std;
#define ENABLE_CUDA_PREPROCESS
class Inference
{
public:
Inference();
~Inference();
inline unsigned int getElementSize(nvinfer1::DataType t);
inline int64_t volume(const nvinfer1::Dims& d);
ICudaEngine* build_engine_onnx(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IBuilder* builder, IBuilderConfig* config, std::string& source_onnx);
ICudaEngine* build_engine_caffe(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IBuilder* builder, IBuilderConfig* config,
const std::string& strCaffeModelFile, const std::string& strCaffeDeployFile, const std::vector<std::string>& vecOutputs);
void ONNXToModel(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IHostMemory** modelStream, std::string& onnx_model_name);
void CaffeToModel(Logger gLogger, unsigned int maxBatchSize, unsigned int maxWorkSpaceSize, IHostMemory** modelStream, std::string& caffe_model_name, std::string& caffe_deploy_name, std::vector<std::string>& outputs);
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int inputIndex, float* input, int inputSize,
unsigned int ouputIndex, float* output, int outputSize, int batchSize);
void doInferenceV2(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int ouputIndex, float* output, int outputSize, int batchSize);
void doInferenceV3(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int inputIndex, float* input, int inputSize,
unsigned int ouputIndex, float* output, int outputSize, int batchSize);
void doInferenceV4(IExecutionContext& context, cudaStream_t& stream, void **buffers, unsigned int ouputIndex, float* output, int outputSize, int batchSize);
float* pfBuffers_[2];
float* pfInputData_ = nullptr;
float* pfOutputData_ = nullptr;
uint8_t* pu8ImgHost_ = nullptr; //相关内存分配
uint8_t* pu8ImgDevice_ = nullptr;
unsigned int uiInputIndex_ = 0, uiOutputIndex_ = 0;
cudaStream_t* pImagePreprocessStream_ = nullptr; //图像预处理CUDA流
cudaStream_t* pInferenceModelStream_ = nullptr; //模型推理CUDA流
Logger* pGLogger_ = nullptr;
IRuntime* pRuntime_ = nullptr;
ICudaEngine* pEngine_ = nullptr;
IExecutionContext* pContext_ = nullptr;
private:
};
#endif //END OF _INFERENCE_H_

View File

@ -0,0 +1,199 @@
#include "retinanet_classify_inference.h"
RetinanetClassifyInference::RetinanetClassifyInference() {}
RetinanetClassifyInference::~RetinanetClassifyInference() {}
int RetinanetClassifyInference::RetinanetClassifyInferenceInit(ModelInfo* pRetinanetClassifyModelInfo, const std::string& strModelName, const std::string& strDeployName, const std::string& strEngineName)
{
pRetinanetClassifyModelInfo_ = pRetinanetClassifyModelInfo;
//资源分配(创建流,host及device侧内存)
cudaSetDevice(DEVICE); //设置GPU
//创建图像预处理CUDA流
pImagePreprocessStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pImagePreprocessStream_));
//创建模型推理CUDA流
pInferenceModelStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pInferenceModelStream_));
pGLogger_ = new Logger;
//相关资源分配
pfBuffers_[0] = nullptr; pfBuffers_[1] = nullptr;
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[0], pRetinanetClassifyModelInfo_->uiInputSize * sizeof(float))); //输入资源分配
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[1], pRetinanetClassifyModelInfo_->uiOutputSize * sizeof(float))); //输出资源分配
pu8ImgHost_ = new uint8_t;
pu8ImgDevice_ = new uint8_t;
CUDA_CHECK(cudaMallocHost((void**)&pu8ImgHost_, MAX_IMAGE_INPUT_SIZE_THRESH * pRetinanetClassifyModelInfo_->uiChannel)); //在HOST侧申请预处理数据缓存
CUDA_CHECK(cudaMalloc((void**)&pu8ImgDevice_, MAX_IMAGE_INPUT_SIZE_THRESH * pRetinanetClassifyModelInfo_->uiChannel)); //在DEVICE侧申请预处理数据缓存
pfInputData_ = new float[pRetinanetClassifyModelInfo_->uiBatchSize * pRetinanetClassifyModelInfo_->uiInputSize];
pfOutputData_ = new float[pRetinanetClassifyModelInfo_->uiBatchSize * pRetinanetClassifyModelInfo_->uiOutputSize];
//序列化引擎
//直接使用API创建一个模型并将其序列化为流 编译成TensorRT引擎engine文件后无需再次调用,调用依次生成engine即可
//基于caffe解析器编译tensorrt引擎
#if 0
std::vector<std::string> vecOutputs = {pRetinanetClassifyModelInfo_->strOutputBlobName};
if (!strModelName.empty() && !strDeployName.empty()) {
IHostMemory* modelStream{ nullptr };
CaffeToModel(*pGLogger_, pRetinanetClassifyModelInfo_->uiBatchSize, MAX_WORKSPAXE_SIZE, &modelStream, strModelName, strDeployName, vecOutputs);
assert(modelStream != nullptr);
std::ofstream p(strEngineName, std::ios::binary);
if (!p) {
std::cerr << "could not open plan output file" << std::endl;
return -1;
}
p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
modelStream->destroy();
}
#endif
//反序列化模型并运行推理
std::ifstream file(strEngineName, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << strEngineName << " error!" << std::endl;
return -1;
}
//创建tensorRT流对象trtModelStream,这个就跟文件流中的ifstream类似的
//trtModelStream是一块内存区域,用于保存序列化的plan文件
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end); //将指针移动至距离文件末尾0处的位置
size = file.tellg(); //获得当前字符的位置
file.seekg(0, file.beg); //将指针移动至距离文件开头0处的位置
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size); //将序列化engine模型(数据及数据大小)读入trtModelStream
file.close();
//1.设置运行时环境
pRuntime_ = createInferRuntime(*pGLogger_); //创建运行时环境IRuntime对象,传入gLogger用于打印信息
assert(pRuntime_ != nullptr);
//2.生成反序列化引擎
pEngine_ = pRuntime_->deserializeCudaEngine(trtModelStream, size); //反序列化引擎engine(根据trtModelStream反序列化)
assert(pEngine_ != nullptr);
//3.创建上下文环境
pContext_ = pEngine_->createExecutionContext(); //创建上下文环境,主要用于inference函数中启动cuda核
assert(pContext_ != nullptr);
delete[] trtModelStream; //析构trtModelStream
std::cout<<"Engine get NB Bindings is: "<<pEngine_->getNbBindings()<<std::endl;
assert(pEngine_->getNbBindings() == 2);
//获取绑定的输入输入
uiInputIndex_ = pEngine_->getBindingIndex((pRetinanetClassifyModelInfo_->strInputBlobName).c_str());
uiOutputIndex_ = pEngine_->getBindingIndex((pRetinanetClassifyModelInfo_->strOutputBlobName).c_str());
std::cout<<"inputIndex: "<<uiInputIndex_<<"\toutputIndex: "<<uiOutputIndex_<<std::endl;
assert(uiInputIndex_ == 0);
assert(uiOutputIndex_ == 1);
return 0;
}
int RetinanetClassifyInference::RetinanetClassifyInferenceDeInit()
{
//资源释放
CUDA_CHECK(cudaStreamDestroy(*pImagePreprocessStream_)); //释放图像预处理CUDA流
if(pImagePreprocessStream_){
delete pImagePreprocessStream_;
pImagePreprocessStream_ = nullptr;
}
CUDA_CHECK(cudaStreamDestroy(*pInferenceModelStream_)); //释放模型推理CUDA流
if(pInferenceModelStream_){ //释放模型推理CUDA流
delete pInferenceModelStream_;
pInferenceModelStream_ = nullptr;
}
CUDA_CHECK(cudaFree(pu8ImgDevice_)); //释放设备端内存
CUDA_CHECK(cudaFreeHost(pu8ImgHost_)); //释放HOST端内存
CUDA_CHECK(cudaFree(pfBuffers_[0])); //释放输入数据设备端内存
CUDA_CHECK(cudaFree(pfBuffers_[1])); //释放输出数据设备端内存
//析构engine引擎资源
pContext_->destroy(); //析构绘话
pEngine_->destroy(); //析构TensorRT引擎
pRuntime_->destroy(); //析构运行时环境
if(pGLogger_){ //释放Logger
delete pGLogger_;
pGLogger_ = nullptr;
}
if(pfInputData_){
delete[] pfInputData_;
pfInputData_ = nullptr;
}
if(pfOutputData_){
delete[] pfOutputData_;
pfOutputData_ = nullptr;
}
return 0;
}
bool RetinanetClassifyInference::RetinanetClassifyInferenceModel(cv::Mat& frame)
{
size_t size_image_src = frame.cols * frame.rows * pRetinanetClassifyModelInfo_->uiChannel;
unsigned int img_width = frame.cols, img_height = frame.rows;
size_t size_image_dst = pRetinanetClassifyModelInfo_->uiModelWidth * pRetinanetClassifyModelInfo_->uiModelHeight * pRetinanetClassifyModelInfo_->uiChannel;
auto preprocess_start = std::chrono::system_clock::now(); //计时开始
// printf("frame cols: %d\t frame rows: %d\n", frame.cols, frame.rows);
// printf("model witdh: %d\t model height: %d\t model channle: %d\n", pRetinanetClassifyModelInfo_->uiModelWidth,
// pRetinanetClassifyModelInfo_->uiModelHeight, pRetinanetClassifyModelInfo_->uiChannel);
#ifdef ENABLE_CUDA_PREPROCESS
memcpy(pu8ImgHost_, frame.data, size_image_src); //拷贝预处理数据到HOST侧
CUDA_CHECK(cudaMemcpyAsync(pu8ImgDevice_, pu8ImgHost_, size_image_src, cudaMemcpyHostToDevice, *pImagePreprocessStream_)); //拷贝预处理数据到Device侧
retinanet_classify_preprocess_kernel_img(pu8ImgDevice_, frame.cols, frame.rows, (float*)pfBuffers_[0], pRetinanetClassifyModelInfo_->uiModelWidth, pRetinanetClassifyModelInfo_->uiModelHeight, *pImagePreprocessStream_);
cudaStreamSynchronize(*pImagePreprocessStream_);
#else
cv::Mat pr_img = preprocess_img(frame, pRetinanetClassifyModelInfo_->uiModelWidth, pRetinanetClassifyModelInfo_->uiModelHeight); // letterbox BGR to RGB
int n = 0;
for (int row = 0; row < pRetinanetClassifyModelInfo_->uiModelHeight; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < pRetinanetClassifyModelInfo_->uiModelWidth; ++col) {
pfInputData_[n] = (float)uc_pixel[2] - 104;
pfInputData_[n + pRetinanetClassifyModelInfo_->uiModelHeight * pRetinanetClassifyModelInfo_->uiModelWidth] = (float)uc_pixel[1] - 117;
pfInputData_[n + 2 * pRetinanetClassifyModelInfo_->uiModelHeight * pRetinanetClassifyModelInfo_->uiModelWidth] = (float)uc_pixel[0] - 123;
uc_pixel += pRetinanetClassifyModelInfo_->uiChannel;
++n;
}
}
#endif
auto preprocess_end = std::chrono::system_clock::now();
// std::cout << "retinanet classify preprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(preprocess_end - preprocess_start).count() << "ms" << std::endl;
//2.推理
auto start = std::chrono::system_clock::now(); //计时开始
#ifdef ENABLE_CUDA_PREPROCESS
doInferenceV2(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiOutputIndex_, pfOutputData_, pRetinanetClassifyModelInfo_->uiOutputSize,
pRetinanetClassifyModelInfo_->uiBatchSize);
#else
float a[2]={0};
doInference(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiInputIndex_, pfInputData_, pRetinanetClassifyModelInfo_->uiInputSize,
uiOutputIndex_, pfOutputData_, pRetinanetClassifyModelInfo_->uiOutputSize,
pRetinanetClassifyModelInfo_->uiBatchSize);
#endif
auto end = std::chrono::system_clock::now();
// std::cout << "retinanet classify inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
//3.后处理
std::cout.setf(std::ios_base::fixed, std::ios_base::floatfield);
// std::cout<<"after inference retinanet classify output[0] is: "<<pfOutputData_[0]<<" output[1] is: "<<pfOutputData_[2]<<std::endl;
if(pfOutputData_[0] < pfOutputData_[1]){
return true;
}else{
return false;
}
}

View File

@ -0,0 +1,26 @@
#ifndef _RETINANET_CLASSIFY_INFERENCE_H_
#define _RETINANET_CLASSIFY_INFERENCE_H_
#include "preprocess.h"
#include "inference.h"
#include "postprocess.h"
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace std;
class RetinanetClassifyInference: public Inference
{
public:
RetinanetClassifyInference();
~RetinanetClassifyInference();
int RetinanetClassifyInferenceInit(ModelInfo* pRetinanetClassifyModelInfo, const std::string& strModelName, const std::string& strDeployName, const std::string& strEngineName);
int RetinanetClassifyInferenceDeInit(void);
bool RetinanetClassifyInferenceModel(cv::Mat& frame);
private:
ModelInfo* pRetinanetClassifyModelInfo_ = nullptr;
};
#endif //END OF _RETINANET_CLASSIFY_INFERENCE_H_

View File

@ -0,0 +1,227 @@
#include "yolov5_classify_inference.h"
YoloV5ClassifyInference::YoloV5ClassifyInference() {}
YoloV5ClassifyInference::~YoloV5ClassifyInference() {}
int YoloV5ClassifyInference::YoloV5ClassifyInferenceInit(ModelInfo* pYoloV5ClassifyModelInfo, const std::string& strModelName, const std::string& strEngineName)
{
pYoloV5ClassifyModelInfo_ = pYoloV5ClassifyModelInfo;
//资源分配(创建流,host及device侧内存)
cudaSetDevice(DEVICE); //设置GPU
//创建图像预处理CUDA流
pImagePreprocessStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pImagePreprocessStream_));
//创建模型推理CUDA流
pInferenceModelStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pInferenceModelStream_));
pGLogger_ = new Logger;
//相关资源分配
pfBuffers_[0] = nullptr; pfBuffers_[1] = nullptr;
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[0], pYoloV5ClassifyModelInfo_->uiInputSize * sizeof(float))); //输入资源分配
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[1], pYoloV5ClassifyModelInfo_->uiOutputSize * sizeof(float))); //输出资源分配
pu8ImgHost_ = new uint8_t;
pu8ImgDevice_ = new uint8_t;
CUDA_CHECK(cudaMallocHost((void**)&pu8ImgHost_, MAX_IMAGE_INPUT_SIZE_THRESH * pYoloV5ClassifyModelInfo_->uiChannel)); //在HOST侧申请预处理数据缓存
CUDA_CHECK(cudaMalloc((void**)&pu8ImgDevice_, MAX_IMAGE_INPUT_SIZE_THRESH * pYoloV5ClassifyModelInfo_->uiChannel)); //在DEVICE侧申请预处理数据缓存
pfInputData_ = new float[pYoloV5ClassifyModelInfo_->uiBatchSize * pYoloV5ClassifyModelInfo_->uiInputSize];
pfOutputData_ = new float[pYoloV5ClassifyModelInfo_->uiBatchSize * pYoloV5ClassifyModelInfo_->uiOutputSize];
//序列化引擎
//直接使用API创建一个模型并将其序列化为流 编译成TensorRT引擎engine文件后无需再次调用,调用依次生成engine即可
//基于onnx解析器编译tensorrt引擎
#if 0
if (!strModelName.empty()) {
IHostMemory* modelStream{ nullptr };
ONNXToModel(*pGLogger_, pYoloV5ClassifyModelInfo_->uiBatchSize, MAX_WORKSPAXE_SIZE, &modelStream, strModelName);
assert(modelStream != nullptr);
std::ofstream p(strEngineName, std::ios::binary);
if (!p) {
std::cerr << "could not open plan output file" << std::endl;
return -1;
}
p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
modelStream->destroy();
}
#endif
//反序列化模型并运行推理
std::ifstream file(strEngineName, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << strEngineName << " error!" << std::endl;
return -1;
}
//创建tensorRT流对象trtModelStream,这个就跟文件流中的ifstream类似的
//trtModelStream是一块内存区域,用于保存序列化的plan文件
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end); //将指针移动至距离文件末尾0处的位置
size = file.tellg(); //获得当前字符的位置
file.seekg(0, file.beg); //将指针移动至距离文件开头0处的位置
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size); //将序列化engine模型(数据及数据大小)读入trtModelStream
file.close();
//1.设置运行时环境
pRuntime_ = createInferRuntime(*pGLogger_); //创建运行时环境IRuntime对象,传入gLogger用于打印信息
assert(pRuntime_ != nullptr);
//2.生成反序列化引擎
//engine = new ICudaEngine;
bool didInitPlugins = initLibNvInferPlugins(nullptr, "");
pEngine_ = pRuntime_->deserializeCudaEngine(trtModelStream, size); //反序列化引擎engine(根据trtModelStream反序列化)
assert(pEngine_ != nullptr);
//3.创建上下文环境
//context = new IExecutionContext;
pContext_ = pEngine_->createExecutionContext(); //创建上下文环境,主要用于inference函数中启动cuda核
assert(pContext_ != nullptr);
delete[] trtModelStream; //析构trtModelStream
std::cout<<"Engine get NB Bindings is: "<<pEngine_->getNbBindings()<<std::endl;
assert(pEngine_->getNbBindings() == 2);
//获取绑定的输入输入
uiInputIndex_ = pEngine_->getBindingIndex((pYoloV5ClassifyModelInfo_->strInputBlobName).c_str());
uiOutputIndex_ = pEngine_->getBindingIndex((pYoloV5ClassifyModelInfo_->strOutputBlobName).c_str());
std::cout<<"inputIndex: "<<uiInputIndex_<<"\toutputIndex: "<<uiOutputIndex_<<std::endl;
assert(uiInputIndex_ == 0);
assert(uiOutputIndex_ == 1);
return 0;
}
int YoloV5ClassifyInference::YoloV5ClassifyInferenceDeinit()
{
//资源释放
CUDA_CHECK(cudaStreamDestroy(*pImagePreprocessStream_)); //释放图像预处理CUDA流
if(pImagePreprocessStream_){
delete pImagePreprocessStream_;
pImagePreprocessStream_ = nullptr;
}
CUDA_CHECK(cudaStreamDestroy(*pInferenceModelStream_)); //释放模型推理CUDA流
if(pInferenceModelStream_){ //释放模型推理CUDA流
delete pInferenceModelStream_;
pInferenceModelStream_ = nullptr;
}
CUDA_CHECK(cudaFree(pu8ImgDevice_)); //释放设备端内存
CUDA_CHECK(cudaFreeHost(pu8ImgHost_)); //释放HOST端内存
CUDA_CHECK(cudaFree(pfBuffers_[0])); //释放输入数据设备端内存
CUDA_CHECK(cudaFree(pfBuffers_[1])); //释放输出数据设备端内存
//析构engine引擎资源
pContext_->destroy(); //析构绘话
pEngine_->destroy(); //析构TensorRT引擎
pRuntime_->destroy(); //析构运行时环境
if(pGLogger_){ //释放Logger
delete pGLogger_;
pGLogger_ = nullptr;
}
if(pfInputData_){
delete[] pfInputData_;
pfInputData_ = nullptr;
}
if(pfOutputData_){
delete[] pfOutputData_;
pfOutputData_ = nullptr;
}
return 0;
}
int YoloV5ClassifyInference::YoloV5ClassifyInferenceModel(cv::Mat& frame, unsigned int* uiClassLabel)
{
size_t size_image_src = frame.cols * frame.rows * pYoloV5ClassifyModelInfo_->uiChannel;
unsigned int img_width = frame.cols, img_height = frame.rows;
size_t size_image_dst = pYoloV5ClassifyModelInfo_->uiModelWidth * pYoloV5ClassifyModelInfo_->uiModelHeight * pYoloV5ClassifyModelInfo_->uiChannel;
auto preprocess_start = std::chrono::system_clock::now(); //计时开始
// printf("frame cols: %d\t frame rows: %d\n", frame.cols, frame.rows);
// printf("model witdh: %d\t model height: %d\t model channle: %d\n", pYoloV5ClassifyModelInfo_->uiModelWidth,
// pYoloV5ClassifyModelInfo_->uiModelHeight, pYoloV5ClassifyModelInfo_->uiChannel);
#ifdef ENABLE_CUDA_PREPROCESS
memcpy(pu8ImgHost_, frame.data, size_image_src); //拷贝预处理数据到HOST侧
CUDA_CHECK(cudaMemcpyAsync(pu8ImgDevice_, pu8ImgHost_, size_image_src, cudaMemcpyHostToDevice, *pImagePreprocessStream_)); //拷贝预处理数据到Device侧
yolov5_classify_preprocess_kernel_img(pu8ImgDevice_, frame.cols, frame.rows, (float*)pfBuffers_[0], pYoloV5ClassifyModelInfo_->uiModelWidth, pYoloV5ClassifyModelInfo_->uiModelHeight, *pImagePreprocessStream_);
cudaStreamSynchronize(*pImagePreprocessStream_);
#else
cv::Mat pr_img = preprocess_img(frame, pYoloV5ClassifyModelInfo_->uiModelWidth, pYoloV5ClassifyModelInfo_->uiModelHeight); // letterbox BGR to RGB
int n = 0;
for (int row = 0; row < pYoloV5ClassifyModelInfo_->uiModelHeight; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < pYoloV5ClassifyModelInfo_->uiModelWidth; ++col) {
pfInputData_[n] = ((float)uc_pixel[2]/255.0 - 0.406) / 0.225; //先进行归一化然后减均值除以标准差
pfInputData_[n + pYoloV5ClassifyModelInfo_->uiModelHeight * pYoloV5ClassifyModelInfo_->uiModelWidth] = ((float)uc_pixel[1]/255.0 - 0.456) / 0.224;
pfInputData_[n + 2 * pYoloV5ClassifyModelInfo_->uiModelHeight * pYoloV5ClassifyModelInfo_->uiModelWidth] = ((float)uc_pixel[0]/255.0 - 0.485) / 0.229;
uc_pixel += pYoloV5ClassifyModelInfo_->uiChannel;
++n;
}
}
#endif
auto preprocess_end = std::chrono::system_clock::now();
std::cout << "yolov5 classify preprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(preprocess_end - preprocess_start).count() << "ms" << std::endl;
//2.推理
float fResizeRatio = GetResizeRatio(img_width, img_height, pYoloV5ClassifyModelInfo_->uiModelWidth, pYoloV5ClassifyModelInfo_->uiModelHeight);
auto start = std::chrono::system_clock::now(); //计时开始
#ifdef ENABLE_CUDA_PREPROCESS
doInferenceV4(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiOutputIndex_, pfOutputData_, pYoloV5ClassifyModelInfo_->uiOutputSize,
pYoloV5ClassifyModelInfo_->uiBatchSize);
#else
doInferenceV3(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiInputIndex_, pfInputData_, pYoloV5ClassifyModelInfo_->uiInputSize,
uiOutputIndex_, pfOutputData_, pYoloV5ClassifyModelInfo_->uiOutputSize,
pYoloV5ClassifyModelInfo_->uiBatchSize);
#endif
auto end = std::chrono::system_clock::now();
std::cout << "yolov5 classify inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
//3.后处理
std::vector<double> vecInput, vecOutput;
for(int i=0; i<pYoloV5ClassifyModelInfo_->uiOutputSize;i++){
vecInput.push_back(pfOutputData_[i]);
}
vecOutput = softMax(vecInput);
float fValue = 0.0;
for(unsigned int i=0; i<pYoloV5ClassifyModelInfo_->uiOutputSize;i++){
std::cout<<vecOutput[i]<<"\t";
if(vecOutput[i]>fValue){
fValue = vecOutput[i];
*uiClassLabel = i;
}
std::cout<<std::endl;
}
}
std::vector<double> YoloV5ClassifyInference::softMax(std::vector<double> vecInput)
{
double total=0;
double dmax = vecInput[0];
for(auto x:vecInput){
dmax = max(x,dmax);
}
for(auto x:vecInput){
total += exp(x-dmax);
}
std::vector<double> vecResult;
for(auto x:vecInput){
vecResult.push_back(exp(x-dmax)/total);
}
return vecResult;
}

View File

@ -0,0 +1,28 @@
#ifndef _YOLOV5_CLASSIFY_INFERENCE_H_
#define _YOLOV5_CLASSIFY_INFERENCE_H_
#include "preprocess.h"
#include "inference.h"
#include "postprocess.h"
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace std;
class YoloV5ClassifyInference: public Inference
{
public:
YoloV5ClassifyInference();
~YoloV5ClassifyInference();
int YoloV5ClassifyInferenceInit(ModelInfo* pYoloV5ClassifyModelInfo, const std::string& strModelName, const std::string& strEngineName);
int YoloV5ClassifyInferenceDeinit(void);
int YoloV5ClassifyInferenceModel(cv::Mat& frame, unsigned int* uiClassLabel);
private:
ModelInfo* pYoloV5ClassifyModelInfo_ = nullptr;
std::vector<double> softMax(std::vector<double> vecInput);
};
#endif //END OF _YOLOV5_CLASSIFY_INFERENCE_H_

View File

@ -0,0 +1,208 @@
#include "yolov5_clear_detect_inference.h"
YoloV5ClearDetectInference::YoloV5ClearDetectInference() {}
YoloV5ClearDetectInference::~YoloV5ClearDetectInference() {}
int YoloV5ClearDetectInference::YoloV5ClearDetectInferenceInit(ClearModelInfo* pYoloV5ClearModelInfo, const std::string& strModelName, const std::string& strEngineName)
{
pYoloV5ClearModelInfo_ = pYoloV5ClearModelInfo;
//资源分配(创建流,host及device侧内存)
cudaSetDevice(DEVICE); //设置GPU
//创建图像预处理CUDA流
pImagePreprocessStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pImagePreprocessStream_));
//创建模型推理CUDA流
pInferenceModelStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pInferenceModelStream_));
pGLogger_ = new Logger;
//相关资源分配
pfBuffers_[0] = nullptr; pfBuffers_[1] = nullptr;
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[0], pYoloV5ClearModelInfo_->modelInfo.uiInputSize * sizeof(float))); //输入资源分配
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[1], pYoloV5ClearModelInfo_->modelInfo.uiOutputSize * sizeof(float))); //输出资源分配
pu8ImgHost_ = new uint8_t;
pu8ImgDevice_ = new uint8_t;
CUDA_CHECK(cudaMallocHost((void**)&pu8ImgHost_, MAX_IMAGE_INPUT_SIZE_THRESH * pYoloV5ClearModelInfo_->modelInfo.uiChannel)); //在HOST侧申请预处理数据缓存
CUDA_CHECK(cudaMalloc((void**)&pu8ImgDevice_, MAX_IMAGE_INPUT_SIZE_THRESH * pYoloV5ClearModelInfo_->modelInfo.uiChannel)); //在DEVICE侧申请预处理数据缓存
pfInputData_ = new float[pYoloV5ClearModelInfo_->modelInfo.uiBatchSize * pYoloV5ClearModelInfo_->modelInfo.uiInputSize];
pfOutputData_ = new float[pYoloV5ClearModelInfo_->modelInfo.uiBatchSize * pYoloV5ClearModelInfo_->modelInfo.uiOutputSize];
//序列化引擎
//直接使用API创建一个模型并将其序列化为流 编译成TensorRT引擎engine文件后无需再次调用,调用依次生成engine即可
//基于onnx解析器编译tensorrt引擎
#if 0
if (!strModelName.empty()) {
IHostMemory* modelStream{ nullptr };
ONNXToModel(*pGLogger_, pYoloV5ClearModelInfo_->modelInfo.uiBatchSize, MAX_WORKSPAXE_SIZE, &modelStream, strModelName);
assert(modelStream != nullptr);
std::ofstream p(strEngineName, std::ios::binary);
if (!p) {
std::cerr << "could not open plan output file" << std::endl;
return -1;
}
p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
modelStream->destroy();
}
#endif
//反序列化模型并运行推理
std::ifstream file(strEngineName, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << strEngineName << " error!" << std::endl;
return -1;
}
//创建tensorRT流对象trtModelStream,这个就跟文件流中的ifstream类似的
//trtModelStream是一块内存区域,用于保存序列化的plan文件
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end); //将指针移动至距离文件末尾0处的位置
size = file.tellg(); //获得当前字符的位置
file.seekg(0, file.beg); //将指针移动至距离文件开头0处的位置
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size); //将序列化engine模型(数据及数据大小)读入trtModelStream
file.close();
//1.设置运行时环境
pRuntime_ = createInferRuntime(*pGLogger_); //创建运行时环境IRuntime对象,传入gLogger用于打印信息
assert(pRuntime_ != nullptr);
//2.生成反序列化引擎
//engine = new ICudaEngine;
bool didInitPlugins = initLibNvInferPlugins(nullptr, "");
pEngine_ = pRuntime_->deserializeCudaEngine(trtModelStream, size); //反序列化引擎engine(根据trtModelStream反序列化)
assert(pEngine_ != nullptr);
//3.创建上下文环境
//context = new IExecutionContext;
pContext_ = pEngine_->createExecutionContext(); //创建上下文环境,主要用于inference函数中启动cuda核
assert(pContext_ != nullptr);
delete[] trtModelStream; //析构trtModelStream
// std::cout<<"Engine get NB Bindings is: "<<pEngine_->getNbBindings()<<std::endl;
assert(pEngine_->getNbBindings() == 2);
//获取绑定的输入输入
uiInputIndex_ = pEngine_->getBindingIndex((pYoloV5ClearModelInfo_->modelInfo.strInputBlobName).c_str());
uiOutputIndex_ = pEngine_->getBindingIndex((pYoloV5ClearModelInfo_->modelInfo.strOutputBlobName).c_str());
// std::cout<<"inputIndex: "<<uiInputIndex_<<"\toutputIndex: "<<uiOutputIndex_<<std::endl;
assert(uiInputIndex_ == 0);
assert(uiOutputIndex_ == 1);
return 0;
}
int YoloV5ClearDetectInference::YoloV5ClearDetectInferenceDeinit()
{
//资源释放
CUDA_CHECK(cudaStreamDestroy(*pImagePreprocessStream_)); //释放图像预处理CUDA流
if(pImagePreprocessStream_){
delete pImagePreprocessStream_;
pImagePreprocessStream_ = nullptr;
}
CUDA_CHECK(cudaStreamDestroy(*pInferenceModelStream_)); //释放模型推理CUDA流
if(pInferenceModelStream_){ //释放模型推理CUDA流
delete pInferenceModelStream_;
pInferenceModelStream_ = nullptr;
}
CUDA_CHECK(cudaFree(pu8ImgDevice_)); //释放设备端内存
CUDA_CHECK(cudaFreeHost(pu8ImgHost_)); //释放HOST端内存
CUDA_CHECK(cudaFree(pfBuffers_[0])); //释放输入数据设备端内存
CUDA_CHECK(cudaFree(pfBuffers_[1])); //释放输出数据设备端内存
//析构engine引擎资源
pContext_->destroy(); //析构绘话
pEngine_->destroy(); //析构TensorRT引擎
pRuntime_->destroy(); //析构运行时环境
if(pGLogger_){ //释放Logger
delete pGLogger_;
pGLogger_ = nullptr;
}
if(pfInputData_){
delete[] pfInputData_;
pfInputData_ = nullptr;
}
if(pfOutputData_){
delete[] pfOutputData_;
pfOutputData_ = nullptr;
}
return 0;
}
int YoloV5ClearDetectInference::YoloV5ClearDetectInferenceModel(cv::Mat& frame, std::vector<ClearDetection>& vecRes)
{
size_t size_image_src = frame.cols * frame.rows * pYoloV5ClearModelInfo_->modelInfo.uiChannel;
unsigned int img_width = frame.cols, img_height = frame.rows;
size_t size_image_dst = pYoloV5ClearModelInfo_->modelInfo.uiModelWidth * pYoloV5ClearModelInfo_->modelInfo.uiModelHeight * pYoloV5ClearModelInfo_->modelInfo.uiChannel;
auto preprocess_start = std::chrono::system_clock::now(); //计时开始
#ifdef ENABLE_CUDA_PREPROCESS
memcpy(pu8ImgHost_, frame.data, size_image_src); //拷贝预处理数据到HOST侧
CUDA_CHECK(cudaMemcpyAsync(pu8ImgDevice_, pu8ImgHost_, size_image_src, cudaMemcpyHostToDevice, *pImagePreprocessStream_)); //拷贝预处理数据到Device侧
yolov5_detect_preprocess_kernel_img(pu8ImgDevice_, frame.cols, frame.rows, (float*)pfBuffers_[0], pYoloV5ClearModelInfo_->modelInfo.uiModelWidth, pYoloV5ClearModelInfo_->modelInfo.uiModelHeight, *pImagePreprocessStream_);
cudaStreamSynchronize(*pImagePreprocessStream_);
#else
cv::Mat pr_img = preprocess_img(frame, pYoloV5ClearModelInfo_->modelInfo.uiModelWidth, pYoloV5ClearModelInfo_->modelInfo.uiModelHeight); // letterbox BGR to RGB
int n = 0;
for (int row = 0; row < pYoloV5ClearModelInfo_->modelInfo.uiModelHeight; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < pYoloV5ClearModelInfo_->modelInfo.uiModelWidth; ++col) {
pfInputData_[n] = (float)uc_pixel[2]/ 255.0; // (float)uc_pixel[2] / 255.0;
pfInputData_[n + pYoloV5ClearModelInfo_->modelInfo.uiModelHeight * pYoloV5ClearModelInfo_->modelInfo.uiModelWidth] = (float)uc_pixel[1]/ 255.0; //(float)uc_pixel[2] / 255.0;
pfInputData_[n + 2 * pYoloV5ClearModelInfo_->modelInfo.uiModelHeight * pYoloV5ClearModelInfo_->modelInfo.uiModelWidth] = (float)uc_pixel[0]/ 255.0; // (float)uc_pixel[2] / 255.0;
uc_pixel += pYoloV5ClearModelInfo_->modelInfo.uiChannel;
++n;
}
}
#endif
auto preprocess_end = std::chrono::system_clock::now();
// std::cout << "yolov5 clear preprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(preprocess_end - preprocess_start).count() << "ms" << std::endl;
//2.推理
float fResizeRatio = GetResizeRatio(img_width, img_height, pYoloV5ClearModelInfo_->modelInfo.uiModelWidth, pYoloV5ClearModelInfo_->modelInfo.uiModelHeight);
auto start = std::chrono::system_clock::now(); //计时开始
#ifdef ENABLE_CUDA_PREPROCESS
doInferenceV4(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiOutputIndex_, pfOutputData_, pYoloV5ClearModelInfo_->modelInfo.uiOutputSize,
pYoloV5ClearModelInfo_->modelInfo.uiBatchSize);
#else
doInferenceV3(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiInputIndex_, pfInputData_, pYoloV5ClearModelInfo_->modelInfo.uiInputSize,
uiOutputIndex_, pfOutputData_, pYoloV5ClearModelInfo_->modelInfo.uiOutputSize,
pYoloV5ClearModelInfo_->modelInfo.uiBatchSize);
#endif
auto end = std::chrono::system_clock::now();
// std::cout << "yolov5 clear inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
//3.后处理
auto decode_nms_start = std::chrono::system_clock::now();
yolov5ClearDecodeOpenCVNms(vecRes, pfOutputData_, pYoloV5ClearModelInfo_->modelInfo.uiOutputSize,
pYoloV5ClearModelInfo_->clearModelParam.modelParam.uiDetSize,
pYoloV5ClearModelInfo_->clearModelParam.modelParam.uiClassNum,
pYoloV5ClearModelInfo_->clearModelParam.uiClearNum,
pYoloV5ClearModelInfo_->clearModelParam.modelParam.fScoreThreshold,
pYoloV5ClearModelInfo_->clearModelParam.modelParam.fNmsThreshold);
auto decode_nms_end = std::chrono::system_clock::now();
// std::cout << "yolov5 clear post time: " << std::chrono::duration_cast<std::chrono::milliseconds>(decode_nms_end - decode_nms_start).count() << "ms" << std::endl;
// std::cout<<"this picture find "<<vecRes.size()<<" objs"<<std::endl;
for(size_t j = 0; j < vecRes.size(); j++){
UpperVertexResetLocation(fResizeRatio, img_width, img_height, vecRes[j].detection); //左上顶点补边方式坐标还原
// CenterResetLocation(fResizeRatio, img_width, img_height, pYoloV5ClearModelInfo_->modelInfo.uiModelWidth, pYoloV5ClearModelInfo_->modelInfo.uiModelHeight, vecRes[j].detection); //中心补边方式坐标还原
}
}

View File

@ -0,0 +1,28 @@
#ifndef _YOLOV5_CLEAR_DETECT_INFERENCE_H_
#define _YOLOV5_CLEAR_DETECT_INFERENCE_H_
#include "preprocess.h"
#include "inference.h"
#include "postprocess.h"
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace std;
class YoloV5ClearDetectInference: public Inference
{
public:
YoloV5ClearDetectInference();
~YoloV5ClearDetectInference();
int YoloV5ClearDetectInferenceInit(ClearModelInfo* pYoloV5ClearModelInfo, const std::string& strModelName, const std::string& strEngineName);
int YoloV5ClearDetectInferenceDeinit(void);
int YoloV5ClearDetectInferenceModel(cv::Mat& frame, std::vector<ClearDetection>& vecRes);
private:
ClearModelInfo* pYoloV5ClearModelInfo_ = nullptr;
};
#endif //END OF _YOLOV5_CLEAR_DETECT_INFERENCE_H_

View File

@ -0,0 +1,211 @@
#include "yolov5_detect_inference.h"
YoloV5DetectInference::YoloV5DetectInference() {}
YoloV5DetectInference::~YoloV5DetectInference() {}
int YoloV5DetectInference::YoloV5DetectInferenceInit(CommonModelInfo* pYoloV5ModelInfo, const std::string& strModelName, const std::string& strEngineName)
{
pYoloV5ModelInfo_ = pYoloV5ModelInfo;
//资源分配(创建流,host及device侧内存)
cudaSetDevice(DEVICE); //设置GPU
//创建图像预处理CUDA流
pImagePreprocessStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pImagePreprocessStream_));
//创建模型推理CUDA流
pInferenceModelStream_ = new cudaStream_t;
CUDA_CHECK(cudaStreamCreate(pInferenceModelStream_));
pGLogger_ = new Logger;
//相关资源分配
pfBuffers_[0] = nullptr; pfBuffers_[1] = nullptr;
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[0], pYoloV5ModelInfo_->modelInfo.uiInputSize * sizeof(float))); //输入资源分配
CUDA_CHECK(cudaMalloc((void**)&pfBuffers_[1], pYoloV5ModelInfo_->modelInfo.uiOutputSize * sizeof(float))); //输出资源分配
pu8ImgHost_ = new uint8_t;
pu8ImgDevice_ = new uint8_t;
CUDA_CHECK(cudaMallocHost((void**)&pu8ImgHost_, MAX_IMAGE_INPUT_SIZE_THRESH * pYoloV5ModelInfo_->modelInfo.uiChannel)); //在HOST侧申请预处理数据缓存
CUDA_CHECK(cudaMalloc((void**)&pu8ImgDevice_, MAX_IMAGE_INPUT_SIZE_THRESH * pYoloV5ModelInfo_->modelInfo.uiChannel)); //在DEVICE侧申请预处理数据缓存
pfInputData_ = new float[pYoloV5ModelInfo_->modelInfo.uiBatchSize * pYoloV5ModelInfo_->modelInfo.uiInputSize];
pfOutputData_ = new float[pYoloV5ModelInfo_->modelInfo.uiBatchSize * pYoloV5ModelInfo_->modelInfo.uiOutputSize];
//序列化引擎
//直接使用API创建一个模型并将其序列化为流 编译成TensorRT引擎engine文件后无需再次调用,调用依次生成engine即可
//基于onnx解析器编译tensorrt引擎
#if 0
if (!strModelName.empty()) {
IHostMemory* modelStream{ nullptr };
ONNXToModel(*pGLogger_, pYoloV5ModelInfo_->modelInfo.uiBatchSize, MAX_WORKSPAXE_SIZE, &modelStream, strModelName);
assert(modelStream != nullptr);
std::ofstream p(strEngineName, std::ios::binary);
if (!p) {
std::cerr << "could not open plan output file" << std::endl;
return -1;
}
p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
modelStream->destroy();
}
#endif
//反序列化模型并运行推理
std::ifstream file(strEngineName, std::ios::binary);
if (!file.good()) {
std::cerr << "read " << strEngineName << " error!" << std::endl;
return -1;
}
//创建tensorRT流对象trtModelStream,这个就跟文件流中的ifstream类似的
//trtModelStream是一块内存区域,用于保存序列化的plan文件
char *trtModelStream = nullptr;
size_t size = 0;
file.seekg(0, file.end); //将指针移动至距离文件末尾0处的位置
size = file.tellg(); //获得当前字符的位置
file.seekg(0, file.beg); //将指针移动至距离文件开头0处的位置
trtModelStream = new char[size];
assert(trtModelStream);
file.read(trtModelStream, size); //将序列化engine模型(数据及数据大小)读入trtModelStream
file.close();
//1.设置运行时环境
pRuntime_ = createInferRuntime(*pGLogger_); //创建运行时环境IRuntime对象,传入gLogger用于打印信息
assert(pRuntime_ != nullptr);
//2.生成反序列化引擎
//engine = new ICudaEngine;
bool didInitPlugins = initLibNvInferPlugins(nullptr, "");
pEngine_ = pRuntime_->deserializeCudaEngine(trtModelStream, size); //反序列化引擎engine(根据trtModelStream反序列化)
assert(pEngine_ != nullptr);
//3.创建上下文环境
//context = new IExecutionContext;
pContext_ = pEngine_->createExecutionContext(); //创建上下文环境,主要用于inference函数中启动cuda核
assert(pContext_ != nullptr);
delete[] trtModelStream; //析构trtModelStream
std::cout<<"Engine get NB Bindings is: "<<pEngine_->getNbBindings()<<std::endl;
assert(pEngine_->getNbBindings() == 2);
//获取绑定的输入输入
uiInputIndex_ = pEngine_->getBindingIndex((pYoloV5ModelInfo_->modelInfo.strInputBlobName).c_str());
uiOutputIndex_ = pEngine_->getBindingIndex((pYoloV5ModelInfo_->modelInfo.strOutputBlobName).c_str());
std::cout<<"inputIndex: "<<uiInputIndex_<<"\toutputIndex: "<<uiOutputIndex_<<std::endl;
assert(uiInputIndex_ == 0);
assert(uiOutputIndex_ == 1);
return 0;
}
int YoloV5DetectInference::YoloV5DetectInferenceDeinit()
{
//资源释放
CUDA_CHECK(cudaStreamDestroy(*pImagePreprocessStream_)); //释放图像预处理CUDA流
if(pImagePreprocessStream_){
delete pImagePreprocessStream_;
pImagePreprocessStream_ = nullptr;
}
CUDA_CHECK(cudaStreamDestroy(*pInferenceModelStream_)); //释放模型推理CUDA流
if(pInferenceModelStream_){ //释放模型推理CUDA流
delete pInferenceModelStream_;
pInferenceModelStream_ = nullptr;
}
CUDA_CHECK(cudaFree(pu8ImgDevice_)); //释放设备端内存
CUDA_CHECK(cudaFreeHost(pu8ImgHost_)); //释放HOST端内存
CUDA_CHECK(cudaFree(pfBuffers_[0])); //释放输入数据设备端内存
CUDA_CHECK(cudaFree(pfBuffers_[1])); //释放输出数据设备端内存
//析构engine引擎资源
pContext_->destroy(); //析构绘话
pEngine_->destroy(); //析构TensorRT引擎
pRuntime_->destroy(); //析构运行时环境
if(pGLogger_){ //释放Logger
delete pGLogger_;
pGLogger_ = nullptr;
}
if(pfInputData_){
delete[] pfInputData_;
pfInputData_ = nullptr;
}
if(pfOutputData_){
delete[] pfOutputData_;
pfOutputData_ = nullptr;
}
return 0;
}
int YoloV5DetectInference::YoloV5DetectInferenceModel(cv::Mat& frame, std::vector<Detection>& vecRes)
{
size_t size_image_src = frame.cols * frame.rows * pYoloV5ModelInfo_->modelInfo.uiChannel;
unsigned int img_width = frame.cols, img_height = frame.rows;
size_t size_image_dst = pYoloV5ModelInfo_->modelInfo.uiModelWidth * pYoloV5ModelInfo_->modelInfo.uiModelHeight * pYoloV5ModelInfo_->modelInfo.uiChannel;
auto preprocess_start = std::chrono::system_clock::now(); //计时开始
// printf("frame cols: %d\t frame rows: %d\n", frame.cols, frame.rows);
// printf("model witdh: %d\t model height: %d\t model channle: %d\n", pYoloV5ModelInfo_->modelInfo.uiModelWidth,
// pYoloV5ModelInfo_->modelInfo.uiModelHeight, pYoloV5ModelInfo_->modelInfo.uiChannel);
#ifdef ENABLE_CUDA_PREPROCESS
memcpy(pu8ImgHost_, frame.data, size_image_src); //拷贝预处理数据到HOST侧
CUDA_CHECK(cudaMemcpyAsync(pu8ImgDevice_, pu8ImgHost_, size_image_src, cudaMemcpyHostToDevice, *pImagePreprocessStream_)); //拷贝预处理数据到Device侧
yolov5_detect_preprocess_kernel_img(pu8ImgDevice_, frame.cols, frame.rows, (float*)pfBuffers_[0], pYoloV5ModelInfo_->modelInfo.uiModelWidth, pYoloV5ModelInfo_->modelInfo.uiModelHeight, *pImagePreprocessStream_);
cudaStreamSynchronize(*pImagePreprocessStream_);
#else
cv::Mat pr_img = preprocess_img(frame, pYoloV5ModelInfo_->modelInfo.uiModelWidth, pYoloV5ModelInfo_->modelInfo.uiModelHeight); // letterbox BGR to RGB
int n = 0;
for (int row = 0; row < pYoloV5ModelInfo_->modelInfo.uiModelHeight; ++row) {
uchar* uc_pixel = pr_img.data + row * pr_img.step;
for (int col = 0; col < pYoloV5ModelInfo_->modelInfo.uiModelWidth; ++col) {
pfInputData_[n] = (float)uc_pixel[2]/ 255.0; // (float)uc_pixel[2] / 255.0;
pfInputData_[n + pYoloV5ModelInfo_->modelInfo.uiModelHeight * pYoloV5ModelInfo_->modelInfo.uiModelWidth] = (float)uc_pixel[1]/ 255.0; //(float)uc_pixel[2] / 255.0;
pfInputData_[n + 2 * pYoloV5ModelInfo_->modelInfo.uiModelHeight * pYoloV5ModelInfo_->modelInfo.uiModelWidth] = (float)uc_pixel[0]/ 255.0; // (float)uc_pixel[2] / 255.0;
uc_pixel += pYoloV5ModelInfo_->modelInfo.uiChannel;
++n;
}
}
#endif
auto preprocess_end = std::chrono::system_clock::now();
std::cout << "yolov5 preprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(preprocess_end - preprocess_start).count() << "ms" << std::endl;
//2.推理
float fResizeRatio = GetResizeRatio(img_width, img_height, pYoloV5ModelInfo_->modelInfo.uiModelWidth, pYoloV5ModelInfo_->modelInfo.uiModelHeight);
auto start = std::chrono::system_clock::now(); //计时开始
#ifdef ENABLE_CUDA_PREPROCESS
doInferenceV4(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiOutputIndex_, pfOutputData_, pYoloV5ModelInfo_->modelInfo.uiOutputSize,
pYoloV5ModelInfo_->modelInfo.uiBatchSize);
#else
doInferenceV3(*pContext_, *pInferenceModelStream_, (void**)pfBuffers_,
uiInputIndex_, pfInputData_, pYoloV5ModelInfo_->modelInfo.uiInputSize,
uiOutputIndex_, pfOutputData_, pYoloV5ModelInfo_->modelInfo.uiOutputSize,
pYoloV5ModelInfo_->modelInfo.uiBatchSize);
#endif
auto end = std::chrono::system_clock::now();
std::cout << "yolov5 inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
//3.后处理
auto decode_nms_start = std::chrono::system_clock::now();
yolov5DecodeOpenCVNms(vecRes, pfOutputData_, pYoloV5ModelInfo_->modelInfo.uiOutputSize,
pYoloV5ModelInfo_->modelParam.uiDetSize,
pYoloV5ModelInfo_->modelParam.uiClassNum,
pYoloV5ModelInfo_->modelParam.fScoreThreshold,
pYoloV5ModelInfo_->modelParam.fNmsThreshold);
auto decode_nms_end = std::chrono::system_clock::now();
std::cout << "yolov5 post time: " << std::chrono::duration_cast<std::chrono::milliseconds>(decode_nms_end - decode_nms_start).count() << "ms" << std::endl;
// std::cout<<"this picture find "<<vecRes.size()<<" objs"<<std::endl;
for(size_t j = 0; j < vecRes.size(); j++){
UpperVertexResetLocation(fResizeRatio, img_width, img_height, vecRes[j]); //左上顶点补边方式坐标还原
// CenterResetLocation(fResizeRatio, img_width, img_height, pYoloV5ModelInfo_->modelInfo.uiModelWidth, pYoloV5ModelInfo_->modelInfo.uiModelHeight, vecRes[j]); //中心补边方式坐标还原
}
}

View File

@ -0,0 +1,27 @@
#ifndef _YOLOV5_DETECT_INFERENCE_H_
#define _YOLOV5_DETECT_INFERENCE_H_
#include "preprocess.h"
#include "inference.h"
#include "postprocess.h"
using namespace nvinfer1;
using namespace nvcaffeparser1;
using namespace std;
class YoloV5DetectInference: public Inference
{
public:
YoloV5DetectInference();
~YoloV5DetectInference();
int YoloV5DetectInferenceInit(CommonModelInfo* pYoloV5ModelInfo, const std::string& strModelName, const std::string& strEngineName);
int YoloV5DetectInferenceDeinit(void);
int YoloV5DetectInferenceModel(cv::Mat& frame, std::vector<Detection>& vecRes);
private:
CommonModelInfo* pYoloV5ModelInfo_ = nullptr;
};
#endif //END OF _YOLOV5_DETECT_INFERENCE_H_

184
src/base/model/model.h Normal file
View File

@ -0,0 +1,184 @@
#ifndef _MODEL_H_
#define _MODEL_H_
#include <iostream>
using namespace std;
#define USE_FP16 // set USE_INT8 or USE_FP16 or USE_FP32
#define DEVICE 0 // GPU id
#define BATCH_SIZE 1 //batch size
#define MAX_WORKSPAXE_SIZE (16 * (1 << 20)) //工作空间大小
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000*3000
#define INPUT_CHANNEL 3 //输入通道数
#define LOCATIONS 4
//yolov5 目标检测
// == step 1 👇 yolov5 ==
// -- 车号/属性 --
#define STEP1_INPUT_BLOB_NAME "images" //输入层名称
#define STEP1_OUTPUT_BLOB_NAME "output" //输出层名称
#define STEP1_INPUT_CHANNEL 3 //输入通道数
#define STEP1_BATCH_SIZE 1 //batch size
#define STEP1_POS_CONF 5 //置信度加坐标
#define STEP1_LOCATIONS 4 //坐标数目
#define STEP1_NMS_THRESH 0.5 //step1 非极大值抑制阈值
#define STEP1_SCORE_THRESH 0.9 //step1 置信度(得分)阈值
#define STEP1_CLASS_NUM 19 //step1 分类数
#define STEP1_CLEAR_NUM 21 //step1 清晰度个数
#define STEP1_INPUT_H 960 //step1 输入图像高
#define STEP1_INPUT_W 960 //step1 输入图像宽
#define STEP1_BATCH_SIZE 1
#define STEP1_OUTPUT_HISTOGRAM_N 5 //不能超过139167
#define STEP1_BBOX_SIZE1 (STEP1_INPUT_H/32)
#define STEP1_BBOX_SIZE2 (STEP1_INPUT_H/16)
#define STEP1_BBOX_SIZE3 (STEP1_INPUT_H/8)
#define STEP1_DET_SIZE (STEP1_CLASS_NUM + STEP1_CLEAR_NUM + STEP1_POS_CONF)
#define STEP1_INPUT_SIZE INPUT_CHANNEL*STEP1_INPUT_H*STEP1_INPUT_W //input
#define STEP1_OUTPUT_SIZE INPUT_CHANNEL*(STEP1_BBOX_SIZE1*STEP1_BBOX_SIZE1+STEP1_BBOX_SIZE2*STEP1_BBOX_SIZE2+STEP1_BBOX_SIZE3*STEP1_BBOX_SIZE3)*(STEP1_CLEAR_NUM+STEP1_CLASS_NUM+STEP1_POS_CONF) //output
// -- 集装箱 --
#define STEP1_CONTAINER_INPUT_BLOB_NAME "images" //输入层名称
#define STEP1_CONTAINER_OUTPUT_BLOB_NAME "output" //输出层名称
#define STEP1_CONTAINER_INPUT_CHANNEL 3 //输入通道数
#define STEP1_CONTAINER_BATCH_SIZE 1 //batch size
#define STEP1_CONTAINER_POS_CONF 5 //置信度加坐标
#define STEP1_CONTAINER_LOCATIONS 4 //坐标数目
#define STEP1_CONTAINER_NMS_THRESH 0.5 //step1 非极大值抑制阈值
#define STEP1_CONTAINER_SCORE_THRESH 0.9 //step1 置信度(得分)阈值
#define STEP1_CONTAINER_CLASS_NUM 19 //step1 分类数
#define STEP1_CONTAINER_CLEAR_NUM 21 //step1 清晰度个数
#define STEP1_CONTAINER_INPUT_H 960 //step1 输入图像高
#define STEP1_CONTAINER_INPUT_W 960 //step1 输入图像宽
#define STEP1_CONTAINER_BATCH_SIZE 1
#define STEP1_CONTAINER_BBOX_SIZE1 (STEP1_CONTAINER_INPUT_H/32)
#define STEP1_CONTAINER_BBOX_SIZE2 (STEP1_CONTAINER_INPUT_H/16)
#define STEP1_CONTAINER_BBOX_SIZE3 (STEP1_CONTAINER_INPUT_H/8)
#define STEP1_CONTAINER_DET_SIZE (STEP1_CONTAINER_CLASS_NUM + STEP1_CONTAINER_CLEAR_NUM + STEP1_CONTAINER_POS_CONF)
#define STEP1_CONTAINER_INPUT_SIZE INPUT_CHANNEL*STEP1_CONTAINER_INPUT_H*STEP1_CONTAINER_INPUT_W //input
#define STEP1_CONTAINER_OUTPUT_SIZE INPUT_CHANNEL*(STEP1_CONTAINER_BBOX_SIZE1*STEP1_CONTAINER_BBOX_SIZE1+STEP1_CONTAINER_BBOX_SIZE2*STEP1_CONTAINER_BBOX_SIZE2+STEP1_CONTAINER_BBOX_SIZE3*STEP1_CONTAINER_BBOX_SIZE3)*(STEP1_CONTAINER_CLEAR_NUM+STEP1_CONTAINER_CLASS_NUM+STEP1_CONTAINER_POS_CONF) //output
// == step 1 👆 yolov5 ==
// == step 2 👇 yolov5 ==
// -- 车号/属性 --
#define STEP2_INPUT_BLOB_NAME "images" //输入层名称
#define STEP2_OUTPUT_BLOB_NAME "output" //输出层名称
#define STEP2_INPUT_CHANNEL 3 //输入通道数
#define STEP2_BATCH_SIZE 1 //batch size
#define STEP2_POS_CONF 5 //置信度加坐标
#define STEP2_LOCATIONS 4 //坐标数目
#define STEP2_NMS_THRESH 0.5 //step2 非极大值抑制阈值
#define STEP2_SCORE_THRESH 0.6 //step2 置信度(得分)阈值
#define STEP2_CLASS_NUM 47 //step2 分类数
#define STEP2_CLEAR_NUM 5 //step2 清晰度个数
#define STEP2_INPUT_H 608 //step2 输入图像高
#define STEP2_INPUT_W 608 //step2 输入图像宽
#define STEP2_BATCH_SIZE 1
#define STEP2_OUTPUT_HISTOGRAM_N 5 //step2 不能超过22743
#define STEP2_BBOX_SIZE1 (STEP2_INPUT_H/32)
#define STEP2_BBOX_SIZE2 (STEP2_INPUT_H/16)
#define STEP2_BBOX_SIZE3 (STEP2_INPUT_H/8)
#define STEP2_DET_SIZE (STEP2_CLASS_NUM + STEP2_CLEAR_NUM + STEP2_POS_CONF)
#define STEP2_INPUT_SIZE INPUT_CHANNEL*STEP2_INPUT_H*STEP2_INPUT_W //input
#define STEP2_OUTPUT_SIZE INPUT_CHANNEL*(STEP2_BBOX_SIZE1*STEP2_BBOX_SIZE1+STEP2_BBOX_SIZE2*STEP2_BBOX_SIZE2+STEP2_BBOX_SIZE3*STEP2_BBOX_SIZE3)*(STEP2_CLEAR_NUM+STEP2_CLASS_NUM+STEP2_POS_CONF) //output
// -- 集装箱 --
#define STEP2_CONTAINER_INPUT_BLOB_NAME "images" //输入层名称
#define STEP2_CONTAINER_OUTPUT_BLOB_NAME "output" //输出层名称
#define STEP2_CONTAINER_INPUT_CHANNEL 3 //输入通道数
#define STEP2_CONTAINER_BATCH_SIZE 1 //batch size
#define STEP2_CONTAINER_POS_CONF 5 //置信度加坐标
#define STEP2_CONTAINER_LOCATIONS 4 //坐标数目
#define STEP2_CONTAINER_NMS_THRESH 0.5 //step1 非极大值抑制阈值
#define STEP2_CONTAINER_SCORE_THRESH 0.9 //step1 置信度(得分)阈值
#define STEP2_CONTAINER_CLASS_NUM 7 //step1 分类数
#define STEP2_CONTAINER_CLEAR_NUM 21 //step1 清晰度个数
#define STEP2_CONTAINER_INPUT_H 960 //step1 输入图像高
#define STEP2_CONTAINER_INPUT_W 960 //step1 输入图像宽
#define STEP2_CONTAINER_BATCH_SIZE 1
#define STEP2_CONTAINER_BBOX_SIZE1 (STEP2_CONTAINER_INPUT_H/32)
#define STEP2_CONTAINER_BBOX_SIZE2 (STEP2_CONTAINER_INPUT_H/16)
#define STEP2_CONTAINER_BBOX_SIZE3 (STEP2_CONTAINER_INPUT_H/8)
#define STEP2_CONTAINER_DET_SIZE (STEP2_CONTAINER_CLASS_NUM + STEP2_CONTAINER_CLEAR_NUM + STEP2_CONTAINER_POS_CONF)
#define STEP2_CONTAINER_INPUT_SIZE INPUT_CHANNEL*STEP2_CONTAINER_INPUT_H*STEP2_CONTAINER_INPUT_W //input
#define STEP2_CONTAINER_OUTPUT_SIZE INPUT_CHANNEL*(STEP2_CONTAINER_BBOX_SIZE1*STEP2_CONTAINER_BBOX_SIZE1+STEP2_CONTAINER_BBOX_SIZE2*STEP2_CONTAINER_BBOX_SIZE2+STEP2_CONTAINER_BBOX_SIZE3*STEP2_CONTAINER_BBOX_SIZE3)*(STEP2_CONTAINER_CLEAR_NUM+STEP2_CONTAINER_CLASS_NUM+STEP2_CONTAINER_POS_CONF) //output
// == step 2 👆 yolov5 ==
// -- 来车检测 retinanet --
#define RETINANET_CLASSIFY_INPUT_CHANNEL 3 //输入通道数
#define RETINANET_CLASSIFY_BATCH_SIZE 1 //batch size
#define RETINANET_CLASSIFY_INPUT_H 537 // 模型输入高
#define RETINANET_CLASSIFY_INPUT_W 925 // 模型输入宽
#define RETINANET_CLASSIFY_INPUT_SIZE (RETINANET_CLASSIFY_BATCH_SIZE*RETINANET_CLASSIFY_INPUT_CHANNEL*RETINANET_CLASSIFY_INPUT_W*RETINANET_CLASSIFY_INPUT_H) //input
#define RETINANET_CLASSIFY_OUTPUT_SIZE 2 //output
#define RETINANET_CLASSIFY_INPUT_BLOB_NAME "data" //输入层名称
#define RETINANET_CLASSIFY_OUTPUT_BLOB_NAME "train" //输出层名称
typedef struct alignas(float) _Detection{
float fBbox[LOCATIONS];
float fClassConf;
int iClassId;
}Detection;
typedef struct alignas(float) _ClearDetection{
Detection detection;
float fClearConf;
int iClearId;
}ClearDetection;
typedef struct _ModelParam{
unsigned int uiClassNum;
unsigned int uiDetSize;
float fScoreThreshold;
float fNmsThreshold;
}ModelParam;
typedef struct _ClearModelParam{
unsigned int uiClearNum;
ModelParam modelParam;
}ClearModelParam;
typedef struct _ModelInfo{
unsigned int uiModelWidth;
unsigned int uiModelHeight;
unsigned int uiInputSize;
unsigned int uiOutputSize;
unsigned int uiChannel;
unsigned int uiBatchSize;
std::string strInputBlobName;
std::string strOutputBlobName;
}ModelInfo;
typedef struct _ClearModelInfo{
ClearModelParam clearModelParam;
ModelInfo modelInfo;
}ClearModelInfo;
typedef struct _CommonModelInfo{
ModelParam modelParam;
ModelInfo modelInfo;
}CommonModelInfo;
#endif //END OF _MODEL_H_

View File

@ -0,0 +1,300 @@
#include "postprocess.h"
std::tuple<uint8_t, uint8_t, uint8_t> hsv2bgr(float h, float s, float v)
{
const int h_i = static_cast<int>(h * 6);
const float f = h * 6 - h_i;
const float p = v * (1 - s);
const float q = v * (1 - f*s);
const float t = v * (1 - (1 - f) * s);
float r, g, b;
switch (h_i) {
case 0:r = v; g = t; b = p;break;
case 1:r = q; g = v; b = p;break;
case 2:r = p; g = v; b = t;break;
case 3:r = p; g = q; b = v;break;
case 4:r = t; g = p; b = v;break;
case 5:r = v; g = p; b = q;break;
default:r = 1; g = 1; b = 1;break;}
return std::make_tuple(static_cast<uint8_t>(b * 255), static_cast<uint8_t>(g * 255), static_cast<uint8_t>(r * 255));
}
std::tuple<uint8_t, uint8_t, uint8_t> hsv2rgb(float h, float s, float v)
{
const int h_i = static_cast<int>(h * 6);
const float f = h * 6 - h_i;
const float p = v * (1 - s);
const float q = v * (1 - f*s);
const float t = v * (1 - (1 - f) * s);
float r, g, b;
switch (h_i) {
case 0:r = v; g = t; b = p;break;
case 1:r = q; g = v; b = p;break;
case 2:r = p; g = v; b = t;break;
case 3:r = p; g = q; b = v;break;
case 4:r = t; g = p; b = v;break;
case 5:r = v; g = p; b = q;break;
default:r = 1; g = 1; b = 1;break;}
return std::make_tuple(static_cast<uint8_t>(r * 255), static_cast<uint8_t>(g * 255), static_cast<uint8_t>(b * 255));
}
std::tuple<uint8_t, uint8_t, uint8_t> randomColor(int id)
{
float h_plane = ((((unsigned int)id << 2) ^ 0x937151) % 100) / 100.0f;;
float s_plane = ((((unsigned int)id << 3) ^ 0x315793) % 100) / 100.0f;
// return hsv2bgr(h_plane, s_plane, 1);
return hsv2rgb(h_plane, s_plane, 1);
}
//坐标转换
void xywh2xyxy(float *xywh, float * xyxy)
{
xyxy[0] = (float)(xywh[0] - xywh[2] / 2);
xyxy[1] = (float)(xywh[1] - xywh[3] / 2);
xyxy[2] = (float)(xywh[0] + xywh[2] / 2);
xyxy[3] = (float)(xywh[1] + xywh[3] / 2);
}
//获取区域框1
cv::Rect getRect(cv::Mat& img, unsigned int uiModelWidth, unsigned int uiModelHeight, float fBbox[4])
{
float l, r, t, b;
float r_w = uiModelWidth / (img.cols * 1.0);
float r_h = uiModelHeight / (img.rows * 1.0);
if(r_h > r_w){
l = fBbox[0] - fBbox[2] / 2.f;
r = fBbox[0] + fBbox[2] / 2.f;
t = fBbox[1] - fBbox[3] / 2.f - (uiModelHeight - r_w * img.rows) / 2;
b = fBbox[1] + fBbox[3] / 2.f - (uiModelHeight - r_w * img.rows) / 2;
l = l / r_w;
r = r / r_w;
t = t / r_w;
b = b / r_w;
}else{
l = fBbox[0] - fBbox[2] / 2.f - (uiModelWidth - r_h * img.cols) / 2;
r = fBbox[0] + fBbox[2] / 2.f - (uiModelWidth - r_h * img.cols) / 2;
t = fBbox[1] - fBbox[3] / 2.f;
b = fBbox[1] + fBbox[3] / 2.f;
l = l / r_h;
r = r / r_h;
t = t / r_h;
b = b / r_h;
}
return cv::Rect(round(l), round(t), round(r - l), round(b - t));
}
//获取区域框2
cv::Rect getRectangle(cv::Mat& img, float fBbox[4])
{
return cv::Rect(round(fBbox[0]), round(fBbox[1]), round(fBbox[2]-fBbox[0]), round(fBbox[3]-fBbox[1]));
}
//计算IOU
float iou(float fLbox[4], float fRbox[4])
{
float interBox[] = {
(std::max)(fLbox[0] - fLbox[2] / 2.f , fRbox[0] - fRbox[2] / 2.f), //left
(std::min)(fLbox[0] + fLbox[2] / 2.f , fRbox[0] + fRbox[2] / 2.f), //right
(std::max)(fLbox[1] - fLbox[3] / 2.f , fRbox[1] - fRbox[3] / 2.f), //top
(std::min)(fLbox[1] + fLbox[3] / 2.f , fRbox[1] + fRbox[3] / 2.f), //bottom
};
if (interBox[2] > interBox[3] || interBox[0] > interBox[1])
return 0.0f;
float fInterBoxS = (interBox[1] - interBox[0])*(interBox[3] - interBox[2]);
return fInterBoxS / (fLbox[2] * fLbox[3] + fRbox[2] * fRbox[3] - fInterBoxS);
}
//比较两者置信度
bool confCmp(const Detection& a, const Detection& b)
{
return a.fClassConf > b.fClassConf;
}
//获取缩放比例
float GetResizeRatio(unsigned int uiImgWidth, unsigned int uiImgHeight, unsigned int uiModelWidth, unsigned int uiModelHeight)
{
float fRatioW = static_cast<float>(uiImgWidth) / uiModelWidth;
float fRatioH = static_cast<float>(uiImgHeight) / uiModelHeight;
return (fRatioW - fRatioH > 1e-5) ? fRatioW : fRatioH;
}
//左上顶点补边方式坐标还原
void UpperVertexResetLocation(float fResizeRatio, unsigned int uiOrigWidth, unsigned int uiOrigHeight, Detection &detection)
{
for(int i=0; i<4; i++){
detection.fBbox[i] = detection.fBbox[i] * fResizeRatio;
}
detection.fBbox[0] = (detection.fBbox[0] < uiOrigWidth) ? detection.fBbox[0] : uiOrigWidth;
detection.fBbox[1] = (detection.fBbox[1] < uiOrigHeight) ? detection.fBbox[1] : uiOrigHeight;
detection.fBbox[2] = (detection.fBbox[2] < uiOrigWidth) ? detection.fBbox[2] : uiOrigWidth;
detection.fBbox[3] = (detection.fBbox[3] < uiOrigHeight) ? detection.fBbox[3] : uiOrigHeight;
}
//中心补边方式坐标还原
void CenterResetLocation(float fResizeRatio, unsigned int uiOrigWidth, unsigned int uiOrigHeight, unsigned int uiInputWidth, unsigned int uiInputHeight, Detection &detection)
{
int w, h, x, y;
float r_w = uiInputWidth / (uiOrigWidth*1.0);
float r_h = uiInputHeight / (uiOrigHeight*1.0);
if (r_h > r_w) {
w = uiInputWidth;
h = r_w * uiOrigHeight;
x = 0;
y = (uiInputHeight - h) / 2;
detection.fBbox[1] -= y;
detection.fBbox[3] -= y;
} else {
w = r_h * uiOrigWidth;
h = uiInputHeight;
x = (uiInputWidth - w) / 2;
y = 0;
detection.fBbox[0] -= x;
detection.fBbox[2] -= x;
}
for(int i=0; i<4; i++){
detection.fBbox[i] = detection.fBbox[i] * fResizeRatio;
}
detection.fBbox[0] = (detection.fBbox[0] < uiOrigWidth) ? detection.fBbox[0] : uiOrigWidth;
detection.fBbox[1] = (detection.fBbox[1] < uiOrigHeight) ? detection.fBbox[1] : uiOrigHeight;
detection.fBbox[2] = (detection.fBbox[2] < uiOrigWidth) ? detection.fBbox[2] : uiOrigWidth;
detection.fBbox[3] = (detection.fBbox[3] < uiOrigHeight) ? detection.fBbox[3] : uiOrigHeight;
}
//非极大值抑制,默认0.5
void yolov5ClearDecodeOpenCVNms(std::vector<ClearDetection>& vecRes, float *fOutput, unsigned int uiOutSize, unsigned int uiDetSize, unsigned int uiClassNum, unsigned int uiClearNum, float fConfThresh = 0.5, float fNmsThresh = 0.4)
{
//1.筛选出第一轮结果根据conf > conf_thresh
std::vector<int> vecFilterList;
for(int i = 0; i < uiOutSize /uiDetSize ; ++i){ //锚框数量(遍历锚框筛去置信度过于低的)
if(fOutput[uiDetSize * i + 4] > fConfThresh){
vecFilterList.emplace_back(i); // 记录下所有置信度大于conf_thresh的锚框
}
}
if(vecFilterList.size() == 0) return;
//2.查找剩余锚框中每个类得分最高值并记录标签
std::vector<ClearDetection> vecResult;
std::vector<cv::Rect> vecBoxes;
std::vector<float> vecScores;
for(int i : vecFilterList){
float* pClassConfidence = &fOutput[uiDetSize * i + 5]; //类别
float fClassConf = *pClassConfidence++;
int iClassLabel = 0;
for(int j = 1; j <uiClassNum; ++j, ++pClassConfidence){ //N个类别中查找置信度最高的,并记录标签
if(*pClassConfidence > fClassConf){
fClassConf = *pClassConfidence;
iClassLabel = j;
}
}
float* pClearConfidence = &fOutput[uiDetSize * i + 5 + uiClassNum]; //清晰度
float fClearConf = *pClearConfidence++;
int iClearLabel = 0;
for(int n = 1; n <uiClearNum; ++n, ++pClearConfidence){ //N个清晰度中查找置信度最高的,并记录标签
if(*pClearConfidence > fClearConf){
fClearConf = *pClearConfidence;
iClearLabel = n;
}
}
float xywh[4] = {fOutput[i*uiDetSize + 0], fOutput[i*uiDetSize + 1], fOutput[i*uiDetSize + 2], fOutput[i*uiDetSize + 3]};
float xyxy[4];
xywh2xyxy(xywh, xyxy); //转换后的坐标
ClearDetection clearDetection;
for(int n = 0; n<4; n++){
clearDetection.detection.fBbox[n] = xyxy[n];
}
clearDetection.detection.fClassConf = fOutput[uiDetSize * i + 4] * fClassConf;
clearDetection.detection.iClassId = iClassLabel;
clearDetection.fClearConf = fOutput[uiDetSize * i + 4] * fClearConf;
clearDetection.iClearId = iClearLabel;
//总得分大于某阈值才进行NMS,此处阈值可以乘某系数!
if(clearDetection.detection.fClassConf>fConfThresh) {
cv::Rect tempRect;
tempRect.x = xyxy[0];
tempRect.y = xyxy[1];
tempRect.width = std::abs(xyxy[2] - xyxy[0]);
tempRect.height = std::abs(xyxy[3] - xyxy[1]);
vecBoxes.push_back(tempRect);
vecScores.push_back(clearDetection.detection.fClassConf);
vecResult.push_back(clearDetection);
}
}
std::vector<int> vecIdx; //保留nms后框的索引
cv::dnn::NMSBoxes(vecBoxes, vecScores, fConfThresh, fNmsThresh, vecIdx);
for (std::size_t i = 0; i < vecIdx.size(); i++){
vecRes.push_back(vecResult[vecIdx[i]]);
}
}
//非极大值抑制,默认0.5
void yolov5DecodeOpenCVNms(std::vector<Detection>& vecRes, float *fOutput, unsigned int uiOutSize, unsigned int uiDetSize, unsigned int uiClassNum, float fConfThresh = 0.5, float fNmsThresh = 0.4)
{
//1.筛选出第一轮结果根据conf > conf_thresh
std::vector<int> vecFilterList;
for(int i = 0; i < uiOutSize /uiDetSize ; ++i){ //锚框数量(遍历锚框筛去置信度过于低的)
if(fOutput[uiDetSize * i + 4] > fConfThresh){
vecFilterList.emplace_back(i); // 记录下所有置信度大于conf_thresh的锚框
}
}
if(vecFilterList.size() == 0) return;
//2.查找剩余锚框中每个类得分最高值并记录标签
std::vector<Detection> vecResult;
std::vector<cv::Rect> vecBoxes;
std::vector<float> vecScores;
for(int i : vecFilterList){
float* pClassConfidence = &fOutput[uiDetSize * i + 5]; //类别
float fClassConf = *pClassConfidence++;
int iClassLabel = 0;
for(int j = 1; j <uiClassNum; ++j, ++pClassConfidence){ //N个类别中查找置信度最高的,并记录标签
if(*pClassConfidence > fClassConf){
fClassConf = *pClassConfidence;
iClassLabel = j;
}
}
float xywh[4] = {fOutput[i*uiDetSize + 0], fOutput[i*uiDetSize + 1], fOutput[i*uiDetSize + 2], fOutput[i*uiDetSize + 3]};
float xyxy[4];
xywh2xyxy(xywh, xyxy); //转换后的坐标
Detection detection;
for(int n = 0; n<4; n++){
detection.fBbox[n] = xyxy[n];
}
detection.fClassConf = fOutput[uiDetSize * i + 4] * fClassConf;
detection.iClassId = iClassLabel;
//总得分大于某阈值才进行NMS,此处阈值可以乘某系数!
if(detection.fClassConf>fConfThresh) {
cv::Rect tempRect;
tempRect.x = xyxy[0];
tempRect.y = xyxy[1];
tempRect.width = std::abs(xyxy[2] - xyxy[0]);
tempRect.height = std::abs(xyxy[3] - xyxy[1]);
vecBoxes.push_back(tempRect);
vecScores.push_back(detection.fClassConf);
vecResult.push_back(detection);
}
}
std::vector<int> vecIdx; //保留nms后框的索引
cv::dnn::NMSBoxes(vecBoxes, vecScores, fConfThresh, fNmsThresh, vecIdx);
for (std::size_t i = 0; i < vecIdx.size(); i++){
vecRes.push_back(vecResult[vecIdx[i]]);
}
}

View File

@ -0,0 +1,54 @@
#ifndef _POST_PROCESS_H_
#define _POST_PROCESS_H_
#include <iostream>
#include <map>
#include <vector>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "model.h"
using namespace std;
using namespace cv;
std::tuple<uint8_t, uint8_t, uint8_t> hsv2bgr(float h, float s, float v);
std::tuple<uint8_t, uint8_t, uint8_t> hsv2rgb(float h, float s, float v);
std::tuple<uint8_t, uint8_t, uint8_t> randomColor(int id);
//坐标转换
void xywh2xyxy(float *xywh, float * xyxy);
//获取区域框1
cv::Rect getRect(cv::Mat& img, unsigned int uiModelWidth, unsigned int uiModelHeight, float fBbox[4]);
//获取区域框2
cv::Rect getRectangle(cv::Mat& img, float fBbox[4]);
//计算IOU
float iou(float fLbox[4], float fRbox[4]);
//比较两者置信度
bool confCmp(const Detection& a, const Detection& b);
//获取缩放比例
float GetResizeRatio(unsigned int uiImgWidth, unsigned int uiImgHeight, unsigned int uiModelWidth, unsigned int uiModelHeight);
//左上顶点补边方式坐标还原
void UpperVertexResetLocation(float fResizeRatio, unsigned int uiOrigWidth, unsigned int uiOrigHeight, Detection &detection);
//中心补边方式坐标还原
void CenterResetLocation(float fResizeRatio, unsigned int uiOrigWidth, unsigned int uiOrigHeight, unsigned int uiInputWidth, unsigned int uiInputHeight, Detection &detection);
//YoloV5带清晰度后处理 非极大值抑制,默认0.5
void yolov5ClearDecodeOpenCVNms(std::vector<ClearDetection>& vecRes, float *fOutput, unsigned int uiOutSize, unsigned int uiDetSize, unsigned int uiClassNum, unsigned int uiClearNum, float fConfThresh, float fNmsThresh);
//YoloV5标准后处理 非极大值抑制,默认0.5
void yolov5DecodeOpenCVNms(std::vector<Detection>& vecRes, float *fOutput, unsigned int uiOutSize, unsigned int uiDetSize, unsigned int uiClassNum, float fConfThresh, float fNmsThresh);
#endif //END OF _POST_PROCESS_H_

View File

@ -0,0 +1,491 @@
#include "preprocess.h"
//仿射变换核函数
__global__ void yolov5_detect_warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
//normalization
c0 = c0 / 255.0f;
c1 = c1 / 255.0f;
c2 = c2 / 255.0f;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
__global__ void yolov5_classify_warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
// 先进行归一化然后减均值除以标准差
c0 = ((c0 / 255.0f) - 0.406) / 0.225;
c1 = ((c1 / 255.0f) - 0.456) / 0.224;
c2 = ((c2 / 255.0f) - 0.485) / 0.229;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
__global__ void retinanet_detect_warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
//subtract the mean
c0 -= 104;
c1 -= 117;
c2 -= 123;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
__global__ void retinanet_classify_warpaffine_kernel(
uint8_t* src, int src_line_size, int src_width,
int src_height, float* dst, int dst_width,
int dst_height, uint8_t const_value_st,
AffineMatrix d2s, int edge) {
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = d2s.value[0];
float m_y1 = d2s.value[1];
float m_z1 = d2s.value[2];
float m_x2 = d2s.value[3];
float m_y2 = d2s.value[4];
float m_z2 = d2s.value[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
float c0, c1, c2;
if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
} else {
int y_low = floorf(src_y);
int x_low = floorf(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if (y_low >= 0) {
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if (y_high < src_height) {
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
}
//bgr to rgb
float t = c2;
c2 = c0;
c0 = t;
//subtract the mean
c0 -= 104;
c1 -= 117;
c2 -= 123;
//rgbrgbrgb to rrrgggbbb
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
void yolov5_detect_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = 0; //左上顶点贴图
// s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5; //中心贴图
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = 0; //左上顶点贴图
// s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5; //中心贴图
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
yolov5_detect_warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
void yolov5_classify_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = 0; //左上顶点贴图
// s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5; //中心贴图
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = 0; //左上顶点贴图
// s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5; //中心贴图
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
yolov5_classify_warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
void retinanet_detect_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = 0; //左上顶点贴图
// s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5; //中心贴图
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = 0; //左上顶点贴图
// s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5; //中心贴图
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
retinanet_detect_warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
void retinanet_classify_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream) {
AffineMatrix s2d,d2s;
float scale = std::min(dst_height / (float)src_height, dst_width / (float)src_width);
s2d.value[0] = scale;
s2d.value[1] = 0;
s2d.value[2] = 0; //左上顶点贴图
// s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5; //中心贴图
s2d.value[3] = 0;
s2d.value[4] = scale;
s2d.value[5] = 0; //左上顶点贴图
// s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5; //中心贴图
cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
int jobs = dst_height * dst_width;
int threads = 256;
int blocks = ceil(jobs / (float)threads);
retinanet_classify_warpaffine_kernel<<<blocks, threads, 0, stream>>>(
src, src_width*3, src_width,
src_height, dst, dst_width,
dst_height, 128, d2s, jobs);
}
// 使用CV进行图像预处理
cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h)
{
int w, h, x, y;
float r_w = input_w / (img.cols*1.0);
float r_h = input_h / (img.rows*1.0);
if (r_h > r_w) {
w = input_w;
h = r_w * img.rows;
x = 0;
y = (input_h - h) / 2;
} else {
w = r_h * img.cols;
h = input_h;
x = (input_w - w) / 2;
y = 0;
}
cv::Mat re(h, w, CV_8UC3);
cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
// re.copyTo(out(cv::Rect(x, y, re.cols, re.rows))); //中心贴图
re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows))); //左上顶点贴图
return out;
}

View File

@ -0,0 +1,40 @@
#ifndef _PRE_PROCESS_H_
#define _PRE_PROCESS_H_
#include <cuda_runtime.h>
#include <cstdint>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
struct AffineMatrix{
float value[6];
};
void yolov5_detect_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
void yolov5_classify_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
void retinanet_detect_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
void retinanet_classify_preprocess_kernel_img(
uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h);
#endif //END OF _PRE_PROCESS_H_

70
src/build.sh Normal file
View File

@ -0,0 +1,70 @@
#!/bin/bash
set -e
# 执行位置
path_cur=$(cd `dirname $0`; pwd)
# 生成目录
app_path="app"
# 可执行程序名
appname="Matrix"
# 创建build目录
function prepare_path() {
[ -n "$1" ] && rm -rf $1
mkdir -p $1
cd $1
}
# 拷贝执行文件依赖库
function cpoyLibs() {
# 利用 ldd 提取依赖库的具体路径
liblist=$(ldd ./$app_path/$appname | awk '{ if (match($3,"/")){ printf("%s "), $3 } }')
# 拷贝库文件和可执行程序到目标文件夹
cp $liblist ./$app_path/lib
}
# X86 平台的
function build_X86(){
local path_build=$path_cur/build
prepare_path $path_build
if [ "$1" == $app_path ]; then
cmake -DCMAKE_BUILD_TYPE="Release" ..
else
cmake -DCMAKE_BUILD_TYPE="Release" ..
fi
make -j4
local ret=$?
cd ..
return ${ret}
}
# build with different according to the parameter, default is A300
if [ "$1" == $app_path ]; then
echo "---------正式模式----------"
rm -rf $app_path
echo "创建文件目录..."
mkdir -p $app_path
mkdir -p $app_path/config $app_path/lib $app_path/logs $app_path/rfid_logs $app_path/result
build_X86 $1
echo "开始拷贝软件库文件..."
cpoyLibs
else
echo "---------调试模式----------"
rm -rf $app_path
echo "创建文件目录..."
mkdir -p $app_path
mkdir -p $app_path/config $app_path/lib $app_path/logs $app_path/rfid_logs $app_path/result
build_X86
fi
if [ $? -ne 0 ]; then
exit 1
fi
echo "开始拷贝配置文件..."
#拷贝yaml文件和xml文件
cp -f ./config/* ./$app_path/config
echo "-----------successfully--------------"
exit 0

39
src/config/config.yaml Normal file
View File

@ -0,0 +1,39 @@
# 基础控制参数
base:
# 股道名称
track_name: "001"
# 测试模式
test_model: false
# 连接模式 【0:网口1:串口】
connect_type: 0
# 是否上传识别结果
up_result: false
# 日志文件目录
log_path: "./logs"
# 识别结果目录
result_path: "./result"
# 日志存储天数
result_save_days: 10
# 日志参数
log:
# 日志级别[DEBUG, INFO, WARN, ERROR, FATAL]
level: "DEBUG"
# http 接口
http_server:
# 使用状态
is_use: false
# 服务器IP
http_ip: 192.168.2.108
# 通讯端口
http_port: 20004
# 获取接口授权地址
token_path: "/api/blade-auth/oauth/token"
# 识别结果上传地址
up_result_path: "/api/train-carriage/identification/rfid-save"
# 接口用户名
username: "guest_01"
# 接口密码
password: "d55b0f642e817eea24725d2f2a31dd08"

21
src/config/matrix.yaml Normal file
View File

@ -0,0 +1,21 @@
use_deviceid:
#engineid: deviceid
0: 0
#engine实例
engines:
GetRfidEngine: 0
DealRfidEngine: 0
SaveRfidEngine: 0
HttpUpResultEngine: 0
SaveResultEngine: 0
DelExpiredEngine: 0
VideoAuxiliaryEngine: 0
#engine连接
connects:
GetRfidEngine_0_0: "DealRfidEngine_0_0 1024"
VideoAuxiliaryEngine_0_0: "DealRfidEngine_0_1 1024"
DealRfidEngine_0_0: "SaveRfidEngine_0_0 1024"
DealRfidEngine_0_1: "HttpUpResultEngine_0_0 1024"
DealRfidEngine_0_2: "SaveResultEngine_0_0 1024"

85
src/main.cpp Normal file
View File

@ -0,0 +1,85 @@
//#include "https_sn.h"
#include "Config.h"
#include "TimeUtil.h"
#include "FileUtil.h"
#include "StringUtil.h"
#include "Utils.h"
#include "myqueue.h"
#include "EngineManager.h"
#include "AppCommon.h"
//using namespace ns_https_sn;
using namespace ai_matrix;
std::atomic_bool app_flag(true);
void SigHandler(int iSigno)
{
if (iSigno == SIGINT)
{
app_flag = false;
}
}
//定义配置文件地址
std::string strConfigYamlPath = "./config/config.yaml";
std::string strEngineYamlPath = "./config/matrix.yaml";
int main(int argc, const char *argv[])
{
//加载配置文件
int iRetYaml = Config::GetIns()->readYaml(strConfigYamlPath);
if (-1 == iRetYaml)
{
LogError << "read yaml file error";
return -1;
}
ai_matrix::BaseConfig baseConfig = Config::GetIns()->getBaseConfig();
ai_matrix::LogConfig logConfig = Config::GetIns()->getLogConfig();
//设置日志信息
MatrixAiLog::Log::SetLogLevel(logConfig.strLevel);
MatrixAiLog::Log::SetLogFile(baseConfig.strLogPath);
if (!FileUtil::getins()->CreateDirPath("./logs"))
{
LogError << "日志目录创建失败";
return -1;
}
//捕获信号
if (signal(SIGINT, SigHandler) == SIG_ERR)
{
LogError << "cannot catch SIGINT.";
return -1;
}
//acl初始化
EngineManager engineManager;
int ret = engineManager.Init();
if (ret != APP_ERR_OK)
{
LogError << "engineManager init error";
return -1;
}
ret = engineManager.load_yaml_config(strEngineYamlPath);
if (ret != APP_ERR_OK)
{
LogError << "load matrix.yaml error";
return -1;
}
engineManager.RunAllEngine();
while (app_flag)
{
usleep(1000);
}
//acl去初始化
engineManager.StopAllEngine();
engineManager.DeInit();
LogInfo << "app end";
return 0;
}