Skip to content

Commit

Permalink
[Android] Add android aar package (PaddlePaddle#416)
Browse files Browse the repository at this point in the history
* [Android] Add Android build docs and demo (#26)

* [Backend] Add override flag to lite backend

* [Docs] Add Android C++ SDK build docs

* [Doc] fix android_build_docs typos

* Update CMakeLists.txt

* Update android.md

* [Doc] Add PicoDet Android demo docs

* [Doc] Update PicoDet Andorid demo docs

* [Doc] Update PaddleClasModel Android demo docs

* [Doc] Update fastdeploy android jni docs

* [Doc] Update fastdeploy android jni usage docs

* [Android] init fastdeploy android jar package

* [Backend] support int8 option for lite backend

* [Model] add Backend::Lite to paddle model

* [Backend] use CopyFromCpu for lite backend.

* [Android] package jni srcs and java api into aar

* Update infer.cc

* Update infer.cc

* [Android] Update package build.gradle

* [Android] Update android app examples

* [Android] update android detection app
  • Loading branch information
DefTruth authored Oct 26, 2022
1 parent b064ddf commit a51e5a6
Show file tree
Hide file tree
Showing 137 changed files with 4,664 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::FasterRCNN(
model_file, params_file, config_file);
model_file, params_file, config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::MaskRCNN(model_file, params_file,
config_file);
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PicoDet(model_file, params_file,
config_file);
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PPYOLO(model_file, params_file,
config_file);
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PPYOLOE(model_file, params_file,
config_file);
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::YOLOv3(model_file, params_file,
config_file);
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
4 changes: 3 additions & 1 deletion examples/vision/detection/paddledetection/cpp/infer_yolox.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::detection::PaddleYOLOX(
model_file, params_file, config_file);
model_file, params_file, config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@ void CpuInfer(const std::string& tinypose_model_dir,
auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel";
auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams";
auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose(
tinypose_model_file, tinypose_params_file, tinypose_config_file);
tinypose_model_file, tinypose_params_file, tinypose_config_file, option);
if (!tinypose_model.Initialized()) {
std::cerr << "TinyPose Model Failed to initialize." << std::endl;
return;
Expand Down
1 change: 1 addition & 0 deletions examples/vision/matting/ppmatting/cpp/infer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file,
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
Expand Down
4 changes: 3 additions & 1 deletion examples/vision/segmentation/paddleseg/cpp/infer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseCpu();
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file);
model_file, params_file, config_file, option);

if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
Expand Down
44 changes: 32 additions & 12 deletions fastdeploy/backends/lite/lite_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,11 @@ FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType& dtype) {
void LiteBackend::BuildOption(const LiteBackendOption& option) {
option_ = option;
std::vector<paddle::lite_api::Place> valid_places;
if (option.enable_fp16) {
if (option_.enable_int8) {
valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kInt8)});
}
if (option_.enable_fp16) {
paddle::lite_api::MobileConfig check_fp16_config;
// Determine whether the device supports the FP16
// instruction set (or whether it is an arm device
Expand All @@ -58,12 +62,12 @@ void LiteBackend::BuildOption(const LiteBackendOption& option) {
valid_places.push_back(
paddle::lite_api::Place{TARGET(kARM), PRECISION(kFloat)});
config_.set_valid_places(valid_places);
if (option.threads > 0) {
config_.set_threads(option.threads);
if (option_.threads > 0) {
config_.set_threads(option_.threads);
}
if (option.power_mode > 0) {
if (option_.power_mode > 0) {
config_.set_power_mode(
static_cast<paddle::lite_api::PowerMode>(option.power_mode));
static_cast<paddle::lite_api::PowerMode>(option_.power_mode));
}
}

Expand Down Expand Up @@ -136,14 +140,13 @@ TensorInfo LiteBackend::GetOutputInfo(int index) {
std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }

bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
std::vector<FDTensor>* outputs) {
std::vector<FDTensor>* outputs) {
if (inputs.size() != inputs_desc_.size()) {
FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
<< ") should keep same with the inputs of this model("
<< inputs_desc_.size() << ")." << std::endl;
return false;
}

for (size_t i = 0; i < inputs.size(); ++i) {
auto iter = inputs_order_.find(inputs[i].name);
if (iter == inputs_order_.end()) {
Expand All @@ -152,12 +155,29 @@ bool LiteBackend::Infer(std::vector<FDTensor>& inputs,
return false;
}
auto tensor = predictor_->GetInput(iter->second);
tensor->Resize(inputs[i].shape);
tensor->ShareExternalMemory(const_cast<void*>(inputs[i].CpuData()),
inputs[i].Nbytes(),
paddle::lite_api::TargetType::kARM);
// Adjust dims only, allocate lazy.
tensor->Resize(inputs[i].shape);
if (inputs[i].dtype == FDDataType::FP32) {
tensor->CopyFromCpu<float, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const float*>(const_cast<void*>(
inputs[i].CpuData())));
} else if (inputs[i].dtype == FDDataType::INT32) {
tensor->CopyFromCpu<int, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const int*>(const_cast<void*>(
inputs[i].CpuData())));
} else if (inputs[i].dtype == FDDataType::INT8) {
tensor->CopyFromCpu<int8_t, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const int8_t*>(const_cast<void*>(
inputs[i].CpuData())));
} else if (inputs[i].dtype == FDDataType::UINT8) {
tensor->CopyFromCpu<uint8_t, paddle::lite_api::TargetType::kARM>(
reinterpret_cast<const uint8_t*>(const_cast<void*>(
inputs[i].CpuData())));
} else {
FDASSERT(false, "Unexpected data type of %d.", inputs[i].dtype);
}
}

predictor_->Run();

outputs->resize(outputs_desc_.size());
Expand Down
2 changes: 2 additions & 0 deletions fastdeploy/backends/lite/lite_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ struct LiteBackendOption {
int power_mode = 3;
// enable fp16
bool enable_fp16 = false;
// enable int8
bool enable_int8 = false;
// optimized model dir for CxxConfig
std::string optimized_model_dir = "";
// TODO(qiuyanjun): support more options for lite backend.
Expand Down
13 changes: 12 additions & 1 deletion fastdeploy/runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,17 @@ void RuntimeOption::EnableLiteFP16() {
lite_enable_fp16 = true;
}

void RuntimeOption::DisableLiteFP16() { lite_enable_fp16 = false; }
void RuntimeOption::DisableLiteFP16() {
lite_enable_fp16 = false;
}

void RuntimeOption::EnableLiteInt8() {
lite_enable_int8 = true;
}

void RuntimeOption::DisableLiteInt8() {
lite_enable_int8 = false;
}

void RuntimeOption::SetLitePowerMode(LitePowerMode mode) {
lite_power_mode = mode;
Expand Down Expand Up @@ -650,6 +660,7 @@ void Runtime::CreateLiteBackend() {
#ifdef ENABLE_LITE_BACKEND
auto lite_option = LiteBackendOption();
lite_option.threads = option.cpu_thread_num;
lite_option.enable_int8 = option.lite_enable_int8;
lite_option.enable_fp16 = option.lite_enable_fp16;
lite_option.power_mode = static_cast<int>(option.lite_power_mode);
lite_option.optimized_model_dir = option.lite_optimized_model_dir;
Expand Down
12 changes: 12 additions & 0 deletions fastdeploy/runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,16 @@ struct FASTDEPLOY_DECL RuntimeOption {
*/
void DisableLiteFP16();

/**
* @brief enable int8 precision while use paddle lite backend
*/
void EnableLiteInt8();

/**
* @brief disable int8 precision, change to full precision(float32)
*/
void DisableLiteInt8();

/**
* @brief Set power mode while using Paddle Lite as inference backend, mode(0: LITE_POWER_HIGH; 1: LITE_POWER_LOW; 2: LITE_POWER_FULL; 3: LITE_POWER_NO_BIND, 4: LITE_POWER_RAND_HIGH; 5: LITE_POWER_RAND_LOW, refer [paddle lite](https://paddle-lite.readthedocs.io/zh/latest/api_reference/cxx_api_doc.html#set-power-mode) for more details)
*/
Expand Down Expand Up @@ -260,6 +270,8 @@ struct FASTDEPLOY_DECL RuntimeOption {
// 3: LITE_POWER_NO_BIND 4: LITE_POWER_RAND_HIGH
// 5: LITE_POWER_RAND_LOW
LitePowerMode lite_power_mode = LitePowerMode::LITE_POWER_NO_BIND;
// enable int8 or not
bool lite_enable_int8 = false;
// enable fp16 or not
bool lite_enable_fp16 = false;
// optimized model dir for CxxConfig
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/ppdet/mask_rcnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ MaskRCNN::MaskRCNN(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER};
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/ppdet/ppyolo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ PPYOLO::PPYOLO(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
has_nms_ = true;
runtime_option = custom_option;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/ppdet/ppyoloe.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/ppdet/rcnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ FasterRCNN::FasterRCNN(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER};
valid_cpu_backends = {Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
has_nms_ = true;
runtime_option = custom_option;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/ppdet/yolov3.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ YOLOv3::YOLOv3(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/detection/ppdet/yolox.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ PaddleYOLOX::PaddleYOLOX(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/faceid/contrib/insightface_rec.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ InsightFaceRecognitionModel::InsightFaceRecognitionModel(
valid_cpu_backends = {Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
}
runtime_option = custom_option;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/matting/ppmatting/ppmatting.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ PPMatting::PPMatting(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::PDINFER};
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/ocr/ppocr/classifier.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ Classifier::Classifier(const std::string& model_file,
Backend::OPENVINO};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
}
runtime_option = custom_option;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/ocr/ppocr/dbdetector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ DBDetector::DBDetector(const std::string& model_file,
Backend::OPENVINO};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
}

Expand Down
1 change: 0 additions & 1 deletion fastdeploy/vision/ocr/ppocr/ppocr_v2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ bool PPOCRv2::Predict(cv::Mat* img,
if (nullptr != classifier_ && result->cls_labels[i] % 2 == 1 && result->cls_scores[i] > classifier_->cls_thresh) {
cv::rotate(image_list[i], image_list[i], 1);
}

if (nullptr != recognizer_ && !Recognize(&(image_list[i]), result)) {
FDERROR << "Failed to recgnize croped image of index " << i << "." << std::endl;
return false;
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/ocr/ppocr/recognizer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ Recognizer::Recognizer(const std::string& model_file,
Backend::OPENVINO};
valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO};
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
}

Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/vision/segmentation/ppseg/model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
const RuntimeOption& custom_option,
const ModelFormat& model_format) {
config_file_ = config_file;
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT};
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
runtime_option = custom_option;
runtime_option.model_format = model_format;
Expand Down Expand Up @@ -106,7 +106,7 @@ bool PaddleSegModel::BuildPreprocessPipelineFromConfig() {
<< "Please refer to https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_export.md"
<< " to export model with fixed input shape."
<< std::endl;
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER};
}
if (input_height != -1 && input_width != -1 && !yml_contain_resize_op) {
Expand Down
Loading

0 comments on commit a51e5a6

Please sign in to comment.