Skip to content

Commit

Permalink
add paddleclas draft doct
Browse files Browse the repository at this point in the history
  • Loading branch information
jiangjiajun committed Aug 10, 2022
1 parent e91cfc9 commit bf5affb
Show file tree
Hide file tree
Showing 12 changed files with 410 additions and 25 deletions.
2 changes: 1 addition & 1 deletion csrc/fastdeploy/vision.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
#include "fastdeploy/vision/faceid/contrib/partial_fc.h"
#include "fastdeploy/vision/faceid/contrib/vpl.h"
#include "fastdeploy/vision/matting/contrib/modnet.h"
#include "fastdeploy/vision/ppcls/model.h"
#include "fastdeploy/vision/classification/ppcls/model.h"
#include "fastdeploy/vision/detection/ppdet/model.h"
#include "fastdeploy/vision/ppseg/model.h"
#endif
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/vision/ppcls/model.h"
#include "fastdeploy/vision/classification/ppcls/model.h"
#include "fastdeploy/vision/utils/utils.h"
#include "yaml-cpp/yaml.h"

namespace fastdeploy {
namespace vision {
namespace ppcls {
namespace classification {

Model::Model(const std::string& model_file, const std::string& params_file,
PaddleClasModel::PaddleClasModel(const std::string& model_file, const std::string& params_file,
const std::string& config_file, const RuntimeOption& custom_option,
const Frontend& model_format) {
config_file_ = config_file;
Expand All @@ -33,7 +33,7 @@ Model::Model(const std::string& model_file, const std::string& params_file,
initialized = Initialize();
}

bool Model::Initialize() {
bool PaddleClasModel::Initialize() {
if (!BuildPreprocessPipelineFromConfig()) {
FDERROR << "Failed to build preprocess pipeline from configuration file."
<< std::endl;
Expand All @@ -46,7 +46,7 @@ bool Model::Initialize() {
return true;
}

bool Model::BuildPreprocessPipelineFromConfig() {
bool PaddleClasModel::BuildPreprocessPipelineFromConfig() {
processors_.clear();
YAML::Node cfg;
try {
Expand Down Expand Up @@ -91,7 +91,7 @@ bool Model::BuildPreprocessPipelineFromConfig() {
return true;
}

bool Model::Preprocess(Mat* mat, FDTensor* output) {
bool PaddleClasModel::Preprocess(Mat* mat, FDTensor* output) {
for (size_t i = 0; i < processors_.size(); ++i) {
if (!(*(processors_[i].get()))(mat)) {
FDERROR << "Failed to process image data in " << processors_[i]->Name()
Expand All @@ -109,7 +109,7 @@ bool Model::Preprocess(Mat* mat, FDTensor* output) {
return true;
}

bool Model::Postprocess(const FDTensor& infer_result, ClassifyResult* result,
bool PaddleClasModel::Postprocess(const FDTensor& infer_result, ClassifyResult* result,
int topk) {
int num_classes = infer_result.shape[1];
const float* infer_result_buffer =
Expand All @@ -124,7 +124,7 @@ bool Model::Postprocess(const FDTensor& infer_result, ClassifyResult* result,
return true;
}

bool Model::Predict(cv::Mat* im, ClassifyResult* result, int topk) {
bool PaddleClasModel::Predict(cv::Mat* im, ClassifyResult* result, int topk) {
Mat mat(*im);
std::vector<FDTensor> processed_data(1);
if (!Preprocess(&mat, &(processed_data[0]))) {
Expand All @@ -148,6 +148,6 @@ bool Model::Predict(cv::Mat* im, ClassifyResult* result, int topk) {
return true;
}

} // namespace ppcls
} // namespace classification
} // namespace vision
} // namespace fastdeploy
Original file line number Diff line number Diff line change
Expand Up @@ -19,21 +19,21 @@

namespace fastdeploy {
namespace vision {
namespace ppcls {
namespace classification {

class FASTDEPLOY_DECL Model : public FastDeployModel {
class FASTDEPLOY_DECL PaddleClasModel : public FastDeployModel {
public:
Model(const std::string& model_file, const std::string& params_file,
PaddleClasModel(const std::string& model_file, const std::string& params_file,
const std::string& config_file,
const RuntimeOption& custom_option = RuntimeOption(),
const Frontend& model_format = Frontend::PADDLE);

std::string ModelName() const { return "ppclas-classify"; }
virtual std::string ModelName() const { return "PaddleClas/Model"; }

// TODO(jiangjiajun) Batch is on the way
virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1);

private:
protected:
bool Initialize();

bool BuildPreprocessPipelineFromConfig();
Expand All @@ -46,6 +46,20 @@ class FASTDEPLOY_DECL Model : public FastDeployModel {
std::vector<std::shared_ptr<Processor>> processors_;
std::string config_file_;
};
} // namespace ppcls

typedef PaddleClasModel PPLCNet;
typedef PaddleClasModel PPLCNetv2;
typedef PaddleClasModel EfficientNet;
typedef PaddleClasModel GhostNet;
typedef PaddleClasModel MobileNetv1;
typedef PaddleClasModel MobileNetv2;
typedef PaddleClasModel MobileNetv3;
typedef PaddleClasModel ShuffleNetv2;
typedef PaddleClasModel SqueezeNet;
typedef PaddleClasModel Inceptionv3;
typedef PaddleClasModel PPHGNet;
typedef PaddleClasModel ResNet50vd;
typedef PaddleClasModel SwinTransformer;
} // namespace classification
} // namespace vision
} // namespace fastdeploy
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,11 @@

namespace fastdeploy {
void BindPPCls(pybind11::module& m) {
auto ppcls_module = m.def_submodule("ppcls", "Module to deploy PaddleClas.");
pybind11::class_<vision::ppcls::Model, FastDeployModel>(ppcls_module, "Model")
pybind11::class_<vision::classification::PaddleClasModel, FastDeployModel>(m, "PaddleClasModel")
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption,
Frontend>())
.def("predict",
[](vision::ppcls::Model& self, pybind11::array& data, int topk = 1) {
[](vision::classification::PaddleClasModel& self, pybind11::array& data, int topk = 1) {
auto mat = PyArrayToCvMat(data);
vision::ClassifyResult res;
self.Predict(&mat, &res, topk);
Expand Down
28 changes: 28 additions & 0 deletions examples/vision/classification/paddleclas/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# PaddleClas 模型部署

## 模型版本说明

- [PaddleClas Release/2.4](https://github.com/PaddlePaddle/PaddleClas)

## 准备PaddleClas部署模型

PaddleClas模型导出,请参考其文档说明[模型导出](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/zh_CN/inference_deployment/export_model.md#2-%E5%88%86%E7%B1%BB%E6%A8%A1%E5%9E%8B%E5%AF%BC%E5%87%BA)

注意:PaddleClas导出的模型仅包含`inference.pdmodel``inference.pdiparams`两个文档,但为了满足部署的需求,同时也需准备其提供的[inference_cls.yaml](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/deploy/configs/inference_cls.yaml)文件,FastDeploy会从yaml文件中获取模型在推理时需要的预处理信息,开发者可直接下载此文件使用。但需根据自己的需求修改yaml文件中的配置参数。


## 下载预训练模型

为了方便开发者的测试,下面提供了PaddleClas导出的部分模型(含inference_cls.yaml文件),开发者可直接下载使用。

| 模型 | 大小 |输入Shape | 精度 |
|:---------------------------------------------------------------- |:----- |:----- | :----- |
| [PPLCNet]() | 141MB | 224x224 |51.4% |
| [PPLCNetv2]() | 10MB | 224x224 |51.4% |
| [EfficientNet]() | | 224x224 | |


## 详细部署文档

- [Python部署](python)
- [C++部署](cpp)
14 changes: 14 additions & 0 deletions examples/vision/classification/paddleclas/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.12)

# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")

include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)

# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})

add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc)
# 添加FastDeploy库依赖
target_link_libraries(infer_demo ${FASTDEPLOY_LIBS})
77 changes: 77 additions & 0 deletions examples/vision/classification/paddleclas/cpp/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# YOLOv7 C++部署示例

本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。

在部署前,需确认以下两个步骤

- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md)
- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuilt_libraries.md)

以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试

```
mkdir build
cd build
wget https://xxx.tgz
tar xvf fastdeploy-linux-x64-0.2.0.tgz
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0
make -j
#下载官方转换好的yolov7模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000087038.jpg
# CPU推理
./infer_demo yolov7.onnx 000000087038.jpg 0
# GPU推理
./infer_demo yolov7.onnx 000000087038.jpg 1
# GPU上TensorRT推理
./infer_demo yolov7.onnx 000000087038.jpg 2
```

## YOLOv7 C++接口

### YOLOv7类

```
fastdeploy::vision::detection::YOLOv7(
const string& model_file,
const string& params_file = "",
const RuntimeOption& runtime_option = RuntimeOption(),
const Frontend& model_format = Frontend::ONNX)
```

YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。

**参数**

> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
> * **model_format**(Frontend): 模型格式,默认为ONNX格式
#### Predict函数

> ```
> YOLOv7::Predict(cv::Mat* im, DetectionResult* result,
> float conf_threshold = 0.25,
> float nms_iou_threshold = 0.5)
> ```
>
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **im**: 输入图像,注意需为HWC,BGR格式
> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
> > * **conf_threshold**: 检测框置信度过滤阈值
> > * **nms_iou_threshold**: NMS处理过程中iou阈值
### 类成员变量

> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640]
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
105 changes: 105 additions & 0 deletions examples/vision/classification/paddleclas/cpp/infer.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/vision.h"

void CpuInfer(const std::string& model_file, const std::string& image_file) {
auto model = fastdeploy::vision::detection::YOLOv7(model_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);
auto im_bak = im.clone();

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

void GpuInfer(const std::string& model_file, const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
auto model = fastdeploy::vision::detection::YOLOv7(model_file, "", option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);
auto im_bak = im.clone();

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

void TrtInfer(const std::string& model_file, const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseGpu();
option.UseTrtBackend();
option.SetTrtInputShape("images", {1, 3, 640, 640});
auto model = fastdeploy::vision::detection::YOLOv7(model_file, "", option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}

auto im = cv::imread(image_file);
auto im_bak = im.clone();

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}

int main(int argc, char* argv[]) {
if (argc < 4) {
std::cout << "Usage: infer_demo path/to/model path/to/image run_option, "
"e.g ./infer_model ./yolov7.onnx ./test.jpeg 0"
<< std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend."
<< std::endl;
return -1;
}

if (std::atoi(argv[3]) == 0) {
CpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 1) {
GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]);
}
return 0;
}
Loading

0 comments on commit bf5affb

Please sign in to comment.