generated from zhangwei/Matrixai
1 line
3.6 KiB
C
1 line
3.6 KiB
C
|
|
// Copyright (c) 2023 Shandong Matrix Software Engineering Co., Ltd All rights reserved.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
#ifndef _YOLOV8_INFERENCE_H_
#define _YOLOV8_INFERENCE_H_
#include <string>
#include "logging.h"
#include <opencv2/opencv.hpp>
#include <NvCaffeParser.h>
#include <NvInfer.h>
#include <NvInferPlugin.h>
#include <NvOnnxParser.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
typedef struct _YoloV5ModelParam
{
unsigned int uiClassNum;
unsigned int uiClearNum;
unsigned int uiDetSize;
float fScoreThreshold;
float fNmsThreshold;
} YoloV5ModelParam;
typedef struct _ModelCommonInfo
{
unsigned int uiModelWidth;
unsigned int uiModelHeight;
unsigned int uiInputSize;
unsigned int uiOutputSize;
unsigned int uiChannel;
unsigned int uiBatchSize;
std::string strInputBlobName;
std::string strOutputBlobName;
} ModelCommonInfo;
typedef struct _YoloV5ModelInfo
{
YoloV5ModelParam yolov5ModelParam;
ModelCommonInfo modelCommonInfo;
} YoloV5ModelInfo;
#define LOCATIONS 4
struct alignas(float) stDetection { //alignas(float)
float bbox[LOCATIONS];
float class_conf;
float clear_conf;
int class_id;
int clear_id;
};
class YoloV8Inference
{
public:
YoloV8Inference();
~YoloV8Inference();
int YoloV8InferenceInit(YoloV5ModelInfo &pYoloV5ModelInfo, const std::string& strModelName, const std::string& strEngineName);
int YoloV8InferenceDeinit(void);
// 空重识别
int EmptyHeavyCheck(cv::Mat& frame, bool& res);
int YoloV8InferenceModelCommon(cv::Mat& frame, float& fResizeRatio);
int YoloV8InferenceModelGetType(cv::Mat& frame, float* fResult, int nsize);
cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h);
float GetResizeRatio(unsigned int img_width, unsigned int img_height, unsigned int model_width, unsigned int model_height);
void doInference(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void** buffers, unsigned int inputIndex, float* input, int inputSize,
unsigned int ouputIndex, float* output, int outputSize, int batchSize);
void doInferenceV2(nvinfer1::IExecutionContext& context, cudaStream_t& stream, void** buffers, unsigned int outputIndex, float* output, int outputSize, int batchSize);
private:
float* pfTransposeData_ = nullptr;
YoloV5ModelInfo stYoloV5ModelInfo_;
cudaStream_t* pImagePreprocessStream_ = nullptr; // 图像预处理CUDA流
cudaStream_t* pInferenceModelStream_ = nullptr; // 模型推理CUDA流
float* pfBuffers_[2];
float* pfInputData_ = nullptr;
float* pfOutputData_ = nullptr;
uint8_t* pu8ImgHost_ = nullptr; // 相关内存分配
uint8_t* pu8ImgDevice_ = nullptr;
unsigned int uiInputIndex_ = 0, uiOutputIndex_ = 0;
Logger* pGLogger_ = nullptr;
nvinfer1::IRuntime* pRuntime_ = nullptr;
nvinfer1::ICudaEngine* pEngine_ = nullptr;
nvinfer1::IExecutionContext* pContext_ = nullptr;
};
#endif // END OF _YOLOV8_CLERAITY_INFERENCE_H_
|