Train_Identify/nvidia_ascend_base/Base/Framework/ModelProcess/ModelProcess.h

101 lines
3.6 KiB
C++

/*
* Copyright(C) 2020. Huawei Technologies Co.,Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MODELPROCSS_H
#define MODELPROCSS_H
#include <cstdio>
#include <vector>
#include <unordered_map>
#include <mutex>
#include "acl/acl.h"
#include "CommonDataType.h"
#include "Log.h"
#include "ErrorCode.h"
#ifdef USE_DCMI_INTERFACE
#include "dcmi_interface_api.h"
#endif
// Class of model inference
class ModelProcess
{
public:
// Construct a new Model Process object for model in the device
ModelProcess(const int deviceId, const std::string &modelName);
ModelProcess();
~ModelProcess();
int Init(std::string modelPath);
#ifdef USE_DCMI_INTERFACE
int Init(const std::string &modelPath, bool isEncrypted, int cardId = 0, int deviceId = 0);
#endif
int DeInit();
APP_ERROR InputBufferWithSizeMalloc(aclrtMemMallocPolicy policy = ACL_MEM_MALLOC_HUGE_FIRST);
APP_ERROR OutputBufferWithSizeMalloc(aclrtMemMallocPolicy policy = ACL_MEM_MALLOC_HUGE_FIRST);
//自己添加的重载
int ModelInference_from_dvpp(void *buffer, uint32_t buffer_size);
int ModelInference(const std::vector<void *> &inputBufs, const std::vector<size_t> &inputSizes,
const std::vector<void *> &ouputBufs, const std::vector<size_t> &outputSizes,
size_t dynamicBatchSize = 0);
int ModelInferDynamicHW(const std::vector<void *> &inputBufs, const std::vector<size_t> &inputSizes,
const std::vector<void *> &ouputBufs, const std::vector<size_t> &outputSizes);
aclmdlDesc *GetModelDesc() const;
size_t GetModelNumInputs() const;
size_t GetModelNumOutputs() const;
size_t GetModelInputSizeByIndex(const size_t &i) const;
size_t GetModelOutputSizeByIndex(const size_t &i) const;
void ReleaseModelBuffer(std::vector<void *> &modelBuffers) const;
void SetModelWH(uint32_t width, uint32_t height);
std::vector<void *> inputBuffers_ = {};
std::vector<size_t> inputSizes_ = {};
std::vector<void *> outputBuffers_ = {};
std::vector<size_t> outputSizes_ = {};
private:
aclmdlDataset *CreateAndFillDataset(const std::vector<void *> &bufs, const std::vector<size_t> &sizes) const;
void DestroyDataset(const aclmdlDataset *dataset) const;
APP_ERROR LoadModel(const std::shared_ptr<uint8_t> &modelData, int modelSize);
#ifdef USE_DCMI_INTERFACE
static void SetConsoleDispMode(int fd, int option);
APP_ERROR GetKeyIdPassword(unsigned int &id, unsigned char password[], unsigned int &passwordLen) const;
#endif
std::mutex mtx_ = {};
int deviceId_ = 0; // Device used
std::string modelName_ = "";
uint32_t modelId_ = 0; // Id of import model
uint32_t modelWidth_ = 0;
uint32_t modelHeight_ = 0;
void *modelDevPtr_ = nullptr;
size_t modelDevPtrSize_ = 0;
void *weightDevPtr_ = nullptr;
size_t weightDevPtrSize_ = 0;
aclrtContext contextModel_ = nullptr;
std::shared_ptr<aclmdlDesc> modelDesc_ = nullptr;
bool isDeInit_ = false;
#ifdef USE_DCMI_INTERFACE
DCMI_ENCRYPTED_DATA_NODE encryptModelData_ = {}; // information for encrypted model
#endif
};
#endif