generated from zhangwei/Train_Identify
81 lines
2.3 KiB
C++
81 lines
2.3 KiB
C++
//模型推理引擎
|
|
#ifndef _INFERENCE_MODEL_ENGINE_H
|
|
#define _INFERENCE_MODEL_ENGINE_H
|
|
|
|
#include <iostream>
|
|
#include <chrono>
|
|
#include <cmath>
|
|
#include <utility>
|
|
#include <thread>
|
|
#include <chrono>
|
|
#include <functional>
|
|
#include <atomic>
|
|
#include <time.h>
|
|
#include <sys/time.h>
|
|
#include <unistd.h>
|
|
#include <queue>
|
|
#include <mutex>
|
|
#include <semaphore.h>
|
|
#include <algorithm>
|
|
#include <string>
|
|
#include <stdio.h>
|
|
#include <stdarg.h>
|
|
#include <string.h>
|
|
#include <vector>
|
|
#include <memory>
|
|
|
|
#include "EngineBase.h"
|
|
#include "EngineFactory.h"
|
|
#include "MyYaml.h"
|
|
#include "myutils.h"
|
|
#include "AppCommon.h"
|
|
|
|
#include "cuda_utils.h"
|
|
#include "logging.h"
|
|
#include "yolov5_common.h"
|
|
#include "utils.h"
|
|
#include "calibrator.h"
|
|
#include "preprocess.h"
|
|
|
|
class InferenceModelEngine : public ai_matrix::EngineBase
|
|
{
|
|
public:
|
|
InferenceModelEngine();
|
|
~InferenceModelEngine();
|
|
|
|
APP_ERROR Init() override;
|
|
APP_ERROR DeInit() override;
|
|
APP_ERROR Process() override;
|
|
|
|
int get_width(int x, float gw, int divisor);
|
|
int get_depth(int x, float gd);
|
|
ICudaEngine* build_engine(unsigned int maxBatchSize, IBuilder* builder,IBuilderConfig* config,
|
|
nvinfer1::DataType dt, float& gd, float& gw, std::string& wts_name);
|
|
ICudaEngine* build_engine_p6(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config,
|
|
nvinfer1::DataType dt, float& gd, float& gw, std::string& wts_name);
|
|
void APIToModel(Logger gLogger, unsigned int maxBatchSize, IHostMemory** modelStream,
|
|
bool& is_p6, float& gd, float& gw, std::string& wts_name);
|
|
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize);
|
|
|
|
private:
|
|
std::string strPort0_;
|
|
|
|
|
|
const char* INPUT_BLOB_NAME = "images"; //输入层名称
|
|
const char* OUTPUT_BLOB_NAME = "output"; //输出层名称
|
|
|
|
float* buffers_[2];
|
|
|
|
cudaStream_t* inference_model_stream_ = nullptr;
|
|
Logger* gLogger_ = nullptr;
|
|
IRuntime* runtime_ = nullptr;
|
|
ICudaEngine* engine_ = nullptr;
|
|
IExecutionContext* context_ = nullptr;
|
|
|
|
const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1;
|
|
};
|
|
|
|
|
|
ENGINE_REGIST(InferenceModelEngine)
|
|
#endif
|