From 1684b05b07b104165c2c6916ee45b6289c29740c Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 13 Jul 2022 13:55:44 +0000 Subject: [PATCH 01/58] first commit for yolov7 --- fastdeploy/vision.h | 1 + fastdeploy/vision/wongkinyiu/__init__.py | 116 +++++++++ .../vision/wongkinyiu/wongkinyiu_pybind.cc | 41 ++++ fastdeploy/vision/wongkinyiu/yolov7.cc | 230 ++++++++++++++++++ fastdeploy/vision/wongkinyiu/yolov7.h | 87 +++++++ model_zoo/vision/yolov7/cpp/CMakeLists.txt | 18 ++ model_zoo/vision/yolov7/cpp/README.md | 30 +++ model_zoo/vision/yolov7/cpp/yolov7.cc | 40 +++ model_zoo/vision/yolov7/yolov7.py | 23 ++ 9 files changed, 586 insertions(+) create mode 100644 fastdeploy/vision/wongkinyiu/__init__.py create mode 100644 fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc create mode 100644 fastdeploy/vision/wongkinyiu/yolov7.cc create mode 100644 fastdeploy/vision/wongkinyiu/yolov7.h create mode 100644 model_zoo/vision/yolov7/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/yolov7/cpp/README.md create mode 100644 model_zoo/vision/yolov7/cpp/yolov7.cc create mode 100644 model_zoo/vision/yolov7/yolov7.py diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index ca2b9a618a..821f3689e5 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -17,6 +17,7 @@ #ifdef ENABLE_VISION #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/ultralytics/yolov5.h" +#include "fastdeploy/vision/wongkinyiu/yolov7.h" #endif #include "fastdeploy/vision/visualize/visualize.h" diff --git a/fastdeploy/vision/wongkinyiu/__init__.py b/fastdeploy/vision/wongkinyiu/__init__.py new file mode 100644 index 0000000000..e3ed7730e6 --- /dev/null +++ b/fastdeploy/vision/wongkinyiu/__init__.py @@ -0,0 +1,116 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import logging +from ... import FastDeployModel, Frontend +from ... import fastdeploy_main as C + + +class YOLOv7(FastDeployModel): + def __init__(self, + model_file, + params_file="", + runtime_option=None, + model_format=Frontend.ONNX): + # 调用基函数进行backend_option的初始化 + # 初始化后的option保存在self._runtime_option + super(YOLOv7, self).__init__(runtime_option) + + self._model = C.vision.yongkinyiu.YOLOv7( + model_file, params_file, self._runtime_option, model_format) + # 通过self.initialized判断整个模型的初始化是否成功 + assert self.initialized, "YOLOv7 initialize failed." + + def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): + return self._model.predict(input_image, conf_threshold, + nms_iou_threshold) + + # 一些跟YOLOv7模型有关的属性封装 + # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) + @property + def size(self): + return self.model.size + + @property + def padding_value(self): + return self.model.padding_value + + @property + def is_no_pad(self): + return self.model.is_no_pad + + @property + def is_mini_pad(self): + return self.model.is_mini_pad + + @property + def is_scale_up(self): + return self.model.is_scale_up + + @property + def stride(self): + return self.model.stride + + @property + def max_wh(self): + return self.model.max_wh + + @size.setter + def size(self, wh): + assert isinstance(wh, [list, tuple]),\ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2,\ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self.model.size = wh + + @padding_value.setter + def padding_value(self, value): + assert isinstance( + value, + list), "The value to set `padding_value` must be type of list." + self.model.padding_value = value + + @is_no_pad.setter + def is_no_pad(self, value): + assert isinstance( + value, bool), "The value to set `is_no_pad` must be type of bool." + self.model.is_no_pad = value + + @is_mini_pad.setter + def is_mini_pad(self, value): + assert isinstance( + value, + bool), "The value to set `is_mini_pad` must be type of bool." + self.model.is_mini_pad = value + + @is_scale_up.setter + def is_scale_up(self, value): + assert isinstance( + value, + bool), "The value to set `is_scale_up` must be type of bool." + self.model.is_scale_up = value + + @stride.setter + def stride(self, value): + assert isinstance( + value, int), "The value to set `stride` must be type of int." + self.model.stride = value + + @max_wh.setter + def max_wh(self, value): + assert isinstance( + value, float), "The value to set `max_wh` must be type of float." + self.model.max_wh = value diff --git a/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc b/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc new file mode 100644 index 0000000000..99f0aab628 --- /dev/null +++ b/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindWongkinyiu(pybind11::module& m) { + auto yongkinyiu_module = + m.def_submodule("WongKinYiu", "https://github.com/WongKinYiu/yolov7"); + pybind11::class_( + yongkinyiu_module, "YOLOv7") + .def(pybind11::init()) + .def("predict", + [](vision::yongkinyiu::YOLOv7& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }) + .def_readwrite("size", &vision::yongkinyiu::YOLOv7::size) + .def_readwrite("padding_value", + &vision::yongkinyiu::YOLOv7::padding_value) + .def_readwrite("is_mini_pad", &vision::yongkinyiu::YOLOv7::is_mini_pad) + .def_readwrite("is_no_pad", &vision::yongkinyiu::YOLOv7::is_no_pad) + .def_readwrite("is_scale_up", &vision::yongkinyiu::YOLOv7::is_scale_up) + .def_readwrite("stride", &vision::yongkinyiu::YOLOv7::stride) + .def_readwrite("max_wh", &vision::yongkinyiu::YOLOv7::max_wh); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/wongkinyiu/yolov7.cc b/fastdeploy/vision/wongkinyiu/yolov7.cc new file mode 100644 index 0000000000..09004b5c3c --- /dev/null +++ b/fastdeploy/vision/wongkinyiu/yolov7.cc @@ -0,0 +1,230 @@ +#include "fastdeploy/vision/WongKinYiu/yolov7.h" +#include "fastdeploy/utils/perf.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace wongkinyiu { + +void LetterBox(Mat* mat, std::vector size, std::vector color, + bool _auto, bool scale_fill = false, bool scale_up = true, + int stride = 32) { + float scale = + std::min(size[1] * 1.0 / mat->Height(), size[0] * 1.0 / mat->Width()); + if (!scale_up) { + scale = std::min(scale, 1.0f); + } + + int resize_h = int(round(mat->Height() * scale)); + int resize_w = int(round(mat->Width() * scale)); + + int pad_w = size[0] - resize_w; + int pad_h = size[1] - resize_h; + if (_auto) { + pad_h = pad_h % stride; + pad_w = pad_w % stride; + } else if (scale_fill) { + pad_h = 0; + pad_w = 0; + resize_h = size[1]; + resize_w = size[0]; + } + Resize::Run(mat, resize_w, resize_h); + if (pad_h > 0 || pad_w > 0) { + float half_h = pad_h * 1.0 / 2; + int top = int(round(half_h - 0.1)); + int bottom = int(round(half_h + 0.1)); + float half_w = pad_w * 1.0 / 2; + int left = int(round(half_w - 0.1)); + int right = int(round(half_w + 0.1)); + Pad::Run(mat, top, bottom, left, right, color); + } +} + +YOLOv7::YOLOv7(const std::string& model_file, const std::string& params_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + if (model_format == Frontend::ONNX) { + valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 + valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 + } else { + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOv7::Initialize() { + // parameters for preprocess + size = {640, 640}; + padding_value = {114.0, 114.0, 114.0}; + is_mini_pad = false; + is_no_pad = false; + is_scale_up = false; + stride = 32; + max_wh = 7680.0; + + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool YOLOv7::Preprocess(Mat* mat, FDTensor* output, + std::map>* im_info) { + // process after image load + double ratio = (size[0] * 1.0) / std::max(static_cast(mat->Height()), + static_cast(mat->Width())); + if (ratio != 1.0) { + int interp = cv::INTER_AREA; + if (ratio > 1.0) { + interp = cv::INTER_LINEAR; + } + int resize_h = int(mat->Height() * ratio); + int resize_w = int(mat->Width() * ratio); + Resize::Run(mat, resize_w, resize_h, -1, -1, interp); + } + // yolov7's preprocess steps + // 1. letterbox + // 2. BGR->RGB + // 3. HWC->CHW + LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, + stride); + BGR2RGB::Run(mat); + Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + std::vector(mat->Channels(), 1.0)); + + // Record output shape of preprocessed image + (*im_info)["output_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + HWC2CHW::Run(mat); + Cast::Run(mat, "float"); + mat->ShareWithTensor(output); + output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c + return true; +} + +bool YOLOv7::Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + int s = i * infer_result.shape[2]; + float confidence = data[s + 4]; + float* max_class_score = + std::max_element(data + s + 5, data + s + infer_result.shape[2]); + confidence *= (*max_class_score); + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + int32_t label_id = std::distance(data + s + 5, max_class_score); + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + data[s] - data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh, + data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh}); + result->label_ids.push_back(label_id); + result->scores.push_back(confidence); + } + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float scale = std::min(out_h / ipt_h, out_w / ipt_w); + for (size_t i = 0; i < result->boxes.size(); ++i) { + float pad_h = (out_h - ipt_h * scale) / 2; + float pad_w = (out_w - ipt_w * scale) / 2; + int32_t label_id = (result->label_ids)[i]; + // clip box + result->boxes[i][0] = result->boxes[i][0] - max_wh * label_id; + result->boxes[i][1] = result->boxes[i][1] - max_wh * label_id; + result->boxes[i][2] = result->boxes[i][2] - max_wh * label_id; + result->boxes[i][3] = result->boxes[i][3] - max_wh * label_id; + result->boxes[i][0] = std::max((result->boxes[i][0] - pad_w) / scale, 0.0f); + result->boxes[i][1] = std::max((result->boxes[i][1] - pad_h) / scale, 0.0f); + result->boxes[i][2] = std::max((result->boxes[i][2] - pad_w) / scale, 0.0f); + result->boxes[i][3] = std::max((result->boxes[i][3] - pad_h) / scale, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h); + } + return true; +} + +bool YOLOv7::Predict(cv::Mat* im, DetectionResult* result, float conf_threshold, + float nms_iou_threshold) { +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_START(0) +#endif + + Mat mat(*im); + std::vector input_tensors(1); + + std::map> im_info; + + // Record the shape of image and the shape of preprocessed image + im_info["input_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + im_info["output_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + + if (!Preprocess(&mat, &input_tensors[0], &im_info)) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(0, "Preprocess") + TIMERECORD_START(1) +#endif + + input_tensors[0].name = InputInfoOfRuntime(0).name; + std::vector output_tensors; + if (!Infer(input_tensors, &output_tensors)) { + FDERROR << "Failed to inference." << std::endl; + return false; + } +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(1, "Inference") + TIMERECORD_START(2) +#endif + + if (!Postprocess(output_tensors[0], result, im_info, conf_threshold, + nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(2, "Postprocess") +#endif + return true; +} + +} // namespace wongkinyiu +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/wongkinyiu/yolov7.h b/fastdeploy/vision/wongkinyiu/yolov7.h new file mode 100644 index 0000000000..b21c04936a --- /dev/null +++ b/fastdeploy/vision/wongkinyiu/yolov7.h @@ -0,0 +1,87 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { +namespace vision { +namespace wongkinyiu { + +class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { + public: + // 当model_format为ONNX时,无需指定params_file + // 当model_format为Paddle时,则需同时指定model_file & params_file + YOLOv7(const std::string& model_file, const std::string& params_file = "", + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX); + + // 定义模型的名称 + virtual std::string ModelName() const { return "WongKinYiu/yolov7"; } + + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + virtual bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + virtual bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + virtual bool Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 模型预测接口,即用户调用的接口 + // im 为用户的输入数据,目前对于CV均定义为cv::Mat + // result 为模型预测的输出结构体 + // conf_threshold 为后处理的参数 + // nms_iou_threshold 为后处理的参数 + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.25, + float nms_iou_threshold = 0.5); + + // 以下为模型在预测时的一些参数,基本是前后处理所需 + // 用户在创建模型后,可根据模型的要求,以及自己的需求 + // 对参数进行修改 + // tuple of (width, height) + std::vector size; + // padding value, size should be same with Channels + std::vector padding_value; + // only pad to the minimum rectange which height and width is times of stride + bool is_mini_pad; + // while is_mini_pad = false and is_no_pad = true, will resize the image to + // the set size + bool is_no_pad; + // if is_scale_up is false, the input image only can be zoom out, the maximum + // resize scale cannot exceed 1.0 + bool is_scale_up; + // padding stride, for is_mini_pad + int stride; + // for offseting the boxes by classes when using NMS + float max_wh; +}; +} // namespace wongkinyiu +} // namespace vision +} // namespace fastdeploy diff --git a/model_zoo/vision/yolov7/cpp/CMakeLists.txt b/model_zoo/vision/yolov7/cpp/CMakeLists.txt new file mode 100644 index 0000000000..b3b790698c --- /dev/null +++ b/model_zoo/vision/yolov7/cpp/CMakeLists.txt @@ -0,0 +1,18 @@ +PROJECT(yolov7_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR /home/fastdeploy/FastDeploy/build/fastdeploy-0.0.3/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(yolov7_demo ${PROJECT_SOURCE_DIR}/yolov7.cc) +# 添加FastDeploy库依赖 +target_link_libraries(yolov7_demo ${FASTDEPLOY_LIBS}) + diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md new file mode 100644 index 0000000000..dd740ff58a --- /dev/null +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -0,0 +1,30 @@ +# 编译YOLOv5示例 + + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 下载模型和图片 +wget https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5s.onnx +wget https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg + +# 执行 +./yolov5_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +223.395142,403.948669, 345.337189, 867.339050, 0.856906, 0 +668.301758,400.781342, 808.441772, 882.534973, 0.829716, 0 +50.210720,398.571411, 243.123367, 905.016602, 0.805375, 0 +23.768242,214.979370, 802.627686, 778.840881, 0.756311, 5 +0.737200,552.281006, 78.617218, 890.945007, 0.363471, 0 +``` diff --git a/model_zoo/vision/yolov7/cpp/yolov7.cc b/model_zoo/vision/yolov7/cpp/yolov7.cc new file mode 100644 index 0000000000..4b89972859 --- /dev/null +++ b/model_zoo/vision/yolov7/cpp/yolov7.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + auto model = vis::wongkinyiu::YOLOv7("/home/fastdeploy/yolov7/onnxfiles/yolov7.onnx"); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + cv::Mat im = cv::imread("bus.jpg"); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite("vis_result.jpg", vis_im); + return 0; +} diff --git a/model_zoo/vision/yolov7/yolov7.py b/model_zoo/vision/yolov7/yolov7.py new file mode 100644 index 0000000000..c502c66366 --- /dev/null +++ b/model_zoo/vision/yolov7/yolov7.py @@ -0,0 +1,23 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +model_url = "https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5s.onnx" +test_jpg_url = "https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg" +fd.download(model_url, ".", show_progress=True) +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.ultralytics.YOLOv5("yolov5s.onnx") + +# 预测图片 +im = cv2.imread("bus.jpg") +result = model.predict(im, conf_threshold=0.25, nms_iou_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) +print(model.runtime_option) From 71c00d94e12c6a52afc1342de893bec2f7850ae2 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Thu, 14 Jul 2022 07:02:55 +0000 Subject: [PATCH 02/58] pybind for yolov7 --- fastdeploy/vision/__init__.py | 1 + fastdeploy/vision/vision_pybind.cc | 2 ++ fastdeploy/vision/wongkinyiu/__init__.py | 2 +- .../vision/wongkinyiu/wongkinyiu_pybind.cc | 24 +++++++++---------- model_zoo/vision/yolov7/yolov7.py | 8 +++---- 5 files changed, 20 insertions(+), 17 deletions(-) diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 810b23cd3d..1ea30c35ae 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -17,3 +17,4 @@ from . import ppcls from . import ultralytics from . import visualize +from . import wongkinyiu diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index f3c3f0052d..5d79ffb2a6 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -17,6 +17,7 @@ namespace fastdeploy { void BindPpClsModel(pybind11::module& m); +void BindWongkinyiu(pybind11::module& m); void BindUltralytics(pybind11::module& m); #ifdef ENABLE_VISION_VISUALIZE void BindVisualize(pybind11::module& m); @@ -40,6 +41,7 @@ void BindVision(pybind11::module& m) { BindPpClsModel(m); BindUltralytics(m); + BindWongkinyiu(m); BindVisualize(m); } } // namespace fastdeploy diff --git a/fastdeploy/vision/wongkinyiu/__init__.py b/fastdeploy/vision/wongkinyiu/__init__.py index e3ed7730e6..0ce06209fc 100644 --- a/fastdeploy/vision/wongkinyiu/__init__.py +++ b/fastdeploy/vision/wongkinyiu/__init__.py @@ -28,7 +28,7 @@ def __init__(self, # 初始化后的option保存在self._runtime_option super(YOLOv7, self).__init__(runtime_option) - self._model = C.vision.yongkinyiu.YOLOv7( + self._model = C.vision.wongkinyiu.YOLOv7( model_file, params_file, self._runtime_option, model_format) # 通过self.initialized判断整个模型的初始化是否成功 assert self.initialized, "YOLOv7 initialize failed." diff --git a/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc b/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc index 99f0aab628..4a10f47a76 100644 --- a/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc +++ b/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc @@ -16,26 +16,26 @@ namespace fastdeploy { void BindWongkinyiu(pybind11::module& m) { - auto yongkinyiu_module = - m.def_submodule("WongKinYiu", "https://github.com/WongKinYiu/yolov7"); - pybind11::class_( - yongkinyiu_module, "YOLOv7") + auto wongkinyiu_module = + m.def_submodule("wongkinyiu", "https://github.com/WongKinYiu/yolov7"); + pybind11::class_( + wongkinyiu_module, "YOLOv7") .def(pybind11::init()) .def("predict", - [](vision::yongkinyiu::YOLOv7& self, pybind11::array& data, + [](vision::wongkinyiu::YOLOv7& self, pybind11::array& data, float conf_threshold, float nms_iou_threshold) { auto mat = PyArrayToCvMat(data); vision::DetectionResult res; self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); return res; }) - .def_readwrite("size", &vision::yongkinyiu::YOLOv7::size) + .def_readwrite("size", &vision::wongkinyiu::YOLOv7::size) .def_readwrite("padding_value", - &vision::yongkinyiu::YOLOv7::padding_value) - .def_readwrite("is_mini_pad", &vision::yongkinyiu::YOLOv7::is_mini_pad) - .def_readwrite("is_no_pad", &vision::yongkinyiu::YOLOv7::is_no_pad) - .def_readwrite("is_scale_up", &vision::yongkinyiu::YOLOv7::is_scale_up) - .def_readwrite("stride", &vision::yongkinyiu::YOLOv7::stride) - .def_readwrite("max_wh", &vision::yongkinyiu::YOLOv7::max_wh); + &vision::wongkinyiu::YOLOv7::padding_value) + .def_readwrite("is_mini_pad", &vision::wongkinyiu::YOLOv7::is_mini_pad) + .def_readwrite("is_no_pad", &vision::wongkinyiu::YOLOv7::is_no_pad) + .def_readwrite("is_scale_up", &vision::wongkinyiu::YOLOv7::is_scale_up) + .def_readwrite("stride", &vision::wongkinyiu::YOLOv7::stride) + .def_readwrite("max_wh", &vision::wongkinyiu::YOLOv7::max_wh); } } // namespace fastdeploy diff --git a/model_zoo/vision/yolov7/yolov7.py b/model_zoo/vision/yolov7/yolov7.py index c502c66366..81c529b15b 100644 --- a/model_zoo/vision/yolov7/yolov7.py +++ b/model_zoo/vision/yolov7/yolov7.py @@ -2,16 +2,16 @@ import cv2 # 下载模型和测试图片 -model_url = "https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5s.onnx" -test_jpg_url = "https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg" +model_url = "TODO " +test_jpg_url = "https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg" fd.download(model_url, ".", show_progress=True) fd.download(test_jpg_url, ".", show_progress=True) # 加载模型 -model = fd.vision.ultralytics.YOLOv5("yolov5s.onnx") +model = fd.vision.wongkinyiu.YOLOv7("yolov7.onnx") # 预测图片 -im = cv2.imread("bus.jpg") +im = cv2.imread("horses.jpg") result = model.predict(im, conf_threshold=0.25, nms_iou_threshold=0.5) # 可视化结果 From 21ab2f939c8e1469f320826808c5d430234e25fd Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Thu, 14 Jul 2022 07:14:03 +0000 Subject: [PATCH 03/58] CPP README.md --- model_zoo/vision/yolov7/cpp/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index dd740ff58a..f19c0625d1 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -12,11 +12,11 @@ cmake .. make -j # 下载模型和图片 -wget https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5s.onnx -wget https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg +wget "TODO" +wget https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg # 执行 -./yolov5_demo +./yolov7_demo ``` 执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 From d63e862f919d0ce9025f78271a03e9a122d2ccdd Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Thu, 14 Jul 2022 07:14:30 +0000 Subject: [PATCH 04/58] CPP README.md --- model_zoo/vision/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index f19c0625d1..b43d4381e5 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -1,4 +1,4 @@ -# 编译YOLOv5示例 +# 编译YOLOv7示例 ``` From 7b3b0e271072987f11fb8ffabdc8d276cf878fa0 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Thu, 14 Jul 2022 09:54:30 +0000 Subject: [PATCH 05/58] modified yolov7.cc --- fastdeploy/vision/wongkinyiu/yolov7.cc | 2 +- model_zoo/vision/yolov7/cpp/CMakeLists.txt | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.cc b/fastdeploy/vision/wongkinyiu/yolov7.cc index 09004b5c3c..6baf4c336b 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.cc +++ b/fastdeploy/vision/wongkinyiu/yolov7.cc @@ -1,4 +1,4 @@ -#include "fastdeploy/vision/WongKinYiu/yolov7.h" +#include "fastdeploy/vision/wongkinyiu/yolov7.h" #include "fastdeploy/utils/perf.h" #include "fastdeploy/vision/utils/utils.h" diff --git a/model_zoo/vision/yolov7/cpp/CMakeLists.txt b/model_zoo/vision/yolov7/cpp/CMakeLists.txt index b3b790698c..09f07b1748 100644 --- a/model_zoo/vision/yolov7/cpp/CMakeLists.txt +++ b/model_zoo/vision/yolov7/cpp/CMakeLists.txt @@ -5,7 +5,7 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.16) # add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) # 指定下载解压后的fastdeploy库路径 -set(FASTDEPLOY_INSTALL_DIR /home/fastdeploy/FastDeploy/build/fastdeploy-0.0.3/) +set(FASTDEPLOY_INSTALL_DIR /home/fastdeploy/FastDeploy/build/fastdeploy-0.0.3) include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) @@ -14,5 +14,4 @@ include_directories(${FASTDEPLOY_INCS}) add_executable(yolov7_demo ${PROJECT_SOURCE_DIR}/yolov7.cc) # 添加FastDeploy库依赖 -target_link_libraries(yolov7_demo ${FASTDEPLOY_LIBS}) - +target_link_libraries(yolov7_demo ${FASTDEPLOY_LIBS}) \ No newline at end of file From d039e800190e484c583509c3b0e97eb2222f32e9 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Fri, 15 Jul 2022 05:11:01 +0000 Subject: [PATCH 06/58] README.md --- model_zoo/vision/yolov7/README.md | 80 +++++++++++++++++++++++++++++++ model_zoo/vision/yolov7/api.md | 71 +++++++++++++++++++++++++++ 2 files changed, 151 insertions(+) create mode 100644 model_zoo/vision/yolov7/README.md create mode 100644 model_zoo/vision/yolov7/api.md diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md new file mode 100644 index 0000000000..80f9aa0fac --- /dev/null +++ b/model_zoo/vision/yolov7/README.md @@ -0,0 +1,80 @@ +# 编译YOLOv7示例 + +本文档说明如何进行[YOLOv7](https://github.com/WongKinYiu/yolov7)的快速部署推理。本目录结构如下 + +``` +. +├── cpp +│   ├── CMakeLists.txt +│   ├── README.md +│   └── yolov7.cc +├── README.md +└── yolov7.py +``` + +## 生成ONNX文件 + +- 手动获取 + + 访问[YOLOv7](https://github.com/WongKinYiu/yolov7)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + + + + ``` + #下载yolov7模型文件 + wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt + + # 导出onnx格式文件 + python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + ``` + + + +- 从PaddlePaddle获取 + +## Python部署 + +### 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` + +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python + +# 安装vision-cpu模块 +fastdeploy install vision-cpu +``` + +### 运行demo + +``` +python yolov7.py +``` + + + +## C++部署 + +### 编译demo文件 + +``` +# 切换到./cpp/ 目录下 +cd cpp/ + +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j +``` + + + + + + + diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md new file mode 100644 index 0000000000..898a3f585f --- /dev/null +++ b/model_zoo/vision/yolov7/api.md @@ -0,0 +1,71 @@ +# YOLOv7 API说明 + +## Python API + +### YOLOv7类 +``` +fastdeploy.vision.ultralytics.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) +``` +YOLOv7模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolov7s.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,RGB格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +示例代码参考[yolov7.py](./yolov7.py) + + +## C++ API + +### YOLOv7类 +``` +fastdeploy::vision::ultralytics::YOLOv7( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolov7s.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> YOLOv7::predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,RGB格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +示例代码参考[cpp/yolov7.cc](cpp/yolov7.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) From a34a815de844834bfcacc8154ab206587b9a7b0b Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 03:14:38 +0000 Subject: [PATCH 07/58] python file modify --- fastdeploy/LICENSE | 201 +++++++ fastdeploy/ThirdPartyNotices.txt | 734 +++++++++++++++++++++++ fastdeploy/vision/wongkinyiu/__init__.py | 28 +- model_zoo/vision/yolov7/yolov7.py | 8 +- 4 files changed, 953 insertions(+), 18 deletions(-) create mode 100644 fastdeploy/LICENSE create mode 100644 fastdeploy/ThirdPartyNotices.txt diff --git a/fastdeploy/LICENSE b/fastdeploy/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/fastdeploy/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/fastdeploy/ThirdPartyNotices.txt b/fastdeploy/ThirdPartyNotices.txt new file mode 100644 index 0000000000..5842b9a717 --- /dev/null +++ b/fastdeploy/ThirdPartyNotices.txt @@ -0,0 +1,734 @@ +This project depends on some open source projects, list as below + +-------- +1. https://github.com/protocolbuffers/protobuf + +Copyright 2008 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------- +2. https://github.com/onnx/onnx + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------- +3. https://github.com/microsoft/onnxruntime + +MIT License + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------- +4. https://github.com/pybind/pybind11 + +Copyright (c) 2016 Wenzel Jakob , All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Please also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of +external contributions to this project including patches, pull requests, etc. + +-------- +4. https://github.com/onnx/onnx-tensorrt + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 NVIDIA Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------- +5. https://github.com/opencv/opencv + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------- +6. https://github.com/jbeder/yaml-cpp + +Copyright (c) 2008-2015 Jesse Beder. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/fastdeploy/vision/wongkinyiu/__init__.py b/fastdeploy/vision/wongkinyiu/__init__.py index 0ce06209fc..542389e208 100644 --- a/fastdeploy/vision/wongkinyiu/__init__.py +++ b/fastdeploy/vision/wongkinyiu/__init__.py @@ -41,31 +41,31 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) @property def size(self): - return self.model.size + return self._model.size @property def padding_value(self): - return self.model.padding_value + return self._model.padding_value @property def is_no_pad(self): - return self.model.is_no_pad + return self._model.is_no_pad @property def is_mini_pad(self): - return self.model.is_mini_pad + return self._model.is_mini_pad @property def is_scale_up(self): - return self.model.is_scale_up + return self._model.is_scale_up @property def stride(self): - return self.model.stride + return self._model.stride @property def max_wh(self): - return self.model.max_wh + return self._model.max_wh @size.setter def size(self, wh): @@ -74,43 +74,43 @@ def size(self, wh): assert len(wh) == 2,\ "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( len(wh)) - self.model.size = wh + self._model.size = wh @padding_value.setter def padding_value(self, value): assert isinstance( value, list), "The value to set `padding_value` must be type of list." - self.model.padding_value = value + self._model.padding_value = value @is_no_pad.setter def is_no_pad(self, value): assert isinstance( value, bool), "The value to set `is_no_pad` must be type of bool." - self.model.is_no_pad = value + self._model.is_no_pad = value @is_mini_pad.setter def is_mini_pad(self, value): assert isinstance( value, bool), "The value to set `is_mini_pad` must be type of bool." - self.model.is_mini_pad = value + self._model.is_mini_pad = value @is_scale_up.setter def is_scale_up(self, value): assert isinstance( value, bool), "The value to set `is_scale_up` must be type of bool." - self.model.is_scale_up = value + self._model.is_scale_up = value @stride.setter def stride(self, value): assert isinstance( value, int), "The value to set `stride` must be type of int." - self.model.stride = value + self._model.stride = value @max_wh.setter def max_wh(self, value): assert isinstance( value, float), "The value to set `max_wh` must be type of float." - self.model.max_wh = value + self._model.max_wh = value diff --git a/model_zoo/vision/yolov7/yolov7.py b/model_zoo/vision/yolov7/yolov7.py index 81c529b15b..ca8aeeaf88 100644 --- a/model_zoo/vision/yolov7/yolov7.py +++ b/model_zoo/vision/yolov7/yolov7.py @@ -2,13 +2,13 @@ import cv2 # 下载模型和测试图片 -model_url = "TODO " -test_jpg_url = "https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg" -fd.download(model_url, ".", show_progress=True) +# model_url = "TODO " +test_jpg_url = "https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/horses.jpg" +# fd.download(model_url, ".", show_progress=True) fd.download(test_jpg_url, ".", show_progress=True) # 加载模型 -model = fd.vision.wongkinyiu.YOLOv7("yolov7.onnx") +model = fd.vision.wongkinyiu.YOLOv7("/home/fastdeploy/yolov7/onnxfiles/yolov7.onnx") # 预测图片 im = cv2.imread("horses.jpg") From 39f64f2f5c0c0c479fa7219b1b436f61d625a61f Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:03:08 +0000 Subject: [PATCH 08/58] delete license in fastdeploy/ --- fastdeploy/LICENSE | 201 --------- fastdeploy/ThirdPartyNotices.txt | 734 ------------------------------- 2 files changed, 935 deletions(-) delete mode 100644 fastdeploy/LICENSE delete mode 100644 fastdeploy/ThirdPartyNotices.txt diff --git a/fastdeploy/LICENSE b/fastdeploy/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/fastdeploy/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/fastdeploy/ThirdPartyNotices.txt b/fastdeploy/ThirdPartyNotices.txt deleted file mode 100644 index 5842b9a717..0000000000 --- a/fastdeploy/ThirdPartyNotices.txt +++ /dev/null @@ -1,734 +0,0 @@ -This project depends on some open source projects, list as below - --------- -1. https://github.com/protocolbuffers/protobuf - -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - --------- -2. https://github.com/onnx/onnx - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------- -3. https://github.com/microsoft/onnxruntime - -MIT License - -Copyright (c) Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------- -4. https://github.com/pybind/pybind11 - -Copyright (c) 2016 Wenzel Jakob , All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Please also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of -external contributions to this project including patches, pull requests, etc. - --------- -4. https://github.com/onnx/onnx-tensorrt - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2021 NVIDIA Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------- -5. https://github.com/opencv/opencv - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------- -6. https://github.com/jbeder/yaml-cpp - -Copyright (c) 2008-2015 Jesse Beder. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. From d071b3702c39386dc3cc9a19af7e0ee56b36cdca Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:15:17 +0000 Subject: [PATCH 09/58] repush the conflict part --- fastdeploy/vision.h | 3 --- fastdeploy/vision/vision_pybind.cc | 3 --- 2 files changed, 6 deletions(-) diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index 4e83d2681c..5f948092d7 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -17,11 +17,8 @@ #ifdef ENABLE_VISION #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/ultralytics/yolov5.h" -<<<<<<< HEAD #include "fastdeploy/vision/wongkinyiu/yolov7.h" -======= #include "fastdeploy/vision/meituan/yolov6.h" ->>>>>>> PaddlePaddle-develop #endif #include "fastdeploy/vision/visualize/visualize.h" diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index 256fb1e114..bc54e0d674 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -42,12 +42,9 @@ void BindVision(pybind11::module& m) { BindPpClsModel(m); BindUltralytics(m); -<<<<<<< HEAD BindWongkinyiu(m); -======= BindMeituan(m); #ifdef ENABLE_VISION_VISUALIZE ->>>>>>> PaddlePaddle-develop BindVisualize(m); #endif } From d5026ca1e47612b7ab85fb27a2730ea350dfc211 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:44:54 +0000 Subject: [PATCH 10/58] README.md modified --- model_zoo/vision/yolov7/README.md | 36 +++++++++------------- model_zoo/vision/yolov7/api.md | 8 ++--- model_zoo/vision/yolov7/cpp/CMakeLists.txt | 2 +- model_zoo/vision/yolov7/cpp/README.md | 3 +- model_zoo/vision/yolov7/cpp/yolov7.cc | 2 +- model_zoo/vision/yolov7/yolov7.py | 4 +-- 6 files changed, 24 insertions(+), 31 deletions(-) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 80f9aa0fac..93a6f81188 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -32,9 +32,9 @@ - 从PaddlePaddle获取 -## Python部署 -### 安装FastDeploy + +## 安装FastDeploy 使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` @@ -45,33 +45,27 @@ pip install fastdeploy-python # 安装vision-cpu模块 fastdeploy install vision-cpu ``` +## Python部署 -### 运行demo - +执行如下代码即会自动下载测试图片 ``` python yolov7.py ``` - - -## C++部署 - -### 编译demo文件 - +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 ``` -# 切换到./cpp/ 目录下 -cd cpp/ - -# 下载和解压预测库 -wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz -tar xvf fastdeploy-linux-x64-0.0.3.tgz - -# 编译示例代码 -mkdir build & cd build -cmake .. -make -j +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +223.395142,403.948669, 345.337189, 867.339050, 0.856906, 0 +668.301758,400.781342, 808.441772, 882.534973, 0.829716, 0 +50.210720,398.571411, 243.123367, 905.016602, 0.805375, 0 +23.768242,214.979370, 802.627686, 778.840881, 0.756311, 5 +0.737200,552.281006, 78.617218, 890.945007, 0.363471, 0 ``` +## 其它文档 + +- [C++部署](./cpp/README.md) +- [YOLOv7 API文档](./api.md) diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md index 898a3f585f..7c5fc30163 100644 --- a/model_zoo/vision/yolov7/api.md +++ b/model_zoo/vision/yolov7/api.md @@ -4,9 +4,9 @@ ### YOLOv7类 ``` -fastdeploy.vision.ultralytics.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) +fastdeploy.vision.wongkinyiu.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) ``` -YOLOv7模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolov7s.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 +YOLOv7模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolov7.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 **参数** @@ -34,13 +34,13 @@ YOLOv7模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只 ### YOLOv7类 ``` -fastdeploy::vision::ultralytics::YOLOv7( +fastdeploy::vision::wongkinyiu::YOLOv7( const string& model_file, const string& params_file = "", const RuntimeOption& runtime_option = RuntimeOption(), const Frontend& model_format = Frontend::ONNX) ``` -YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolov7s.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 +YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolov7.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 **参数** diff --git a/model_zoo/vision/yolov7/cpp/CMakeLists.txt b/model_zoo/vision/yolov7/cpp/CMakeLists.txt index 09f07b1748..ec7c86d026 100644 --- a/model_zoo/vision/yolov7/cpp/CMakeLists.txt +++ b/model_zoo/vision/yolov7/cpp/CMakeLists.txt @@ -5,7 +5,7 @@ CMAKE_MINIMUM_REQUIRED (VERSION 3.16) # add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) # 指定下载解压后的fastdeploy库路径 -set(FASTDEPLOY_INSTALL_DIR /home/fastdeploy/FastDeploy/build/fastdeploy-0.0.3) +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/) include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index b43d4381e5..fd46e210f8 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -26,5 +26,6 @@ DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] 668.301758,400.781342, 808.441772, 882.534973, 0.829716, 0 50.210720,398.571411, 243.123367, 905.016602, 0.805375, 0 23.768242,214.979370, 802.627686, 778.840881, 0.756311, 5 -0.737200,552.281006, 78.617218, 890.945007, 0.363471, 0 +0.737200,552.281006, 78.617218, 890.945007, 0.36341 ``` + diff --git a/model_zoo/vision/yolov7/cpp/yolov7.cc b/model_zoo/vision/yolov7/cpp/yolov7.cc index 4b89972859..1607b2be09 100644 --- a/model_zoo/vision/yolov7/cpp/yolov7.cc +++ b/model_zoo/vision/yolov7/cpp/yolov7.cc @@ -16,7 +16,7 @@ int main() { namespace vis = fastdeploy::vision; - auto model = vis::wongkinyiu::YOLOv7("/home/fastdeploy/yolov7/onnxfiles/yolov7.onnx"); + auto model = vis::wongkinyiu::YOLOv7("yolov7.onnx"); if (!model.Initialized()) { std::cerr << "Init Failed." << std::endl; return -1; diff --git a/model_zoo/vision/yolov7/yolov7.py b/model_zoo/vision/yolov7/yolov7.py index ca8aeeaf88..cef467622d 100644 --- a/model_zoo/vision/yolov7/yolov7.py +++ b/model_zoo/vision/yolov7/yolov7.py @@ -2,13 +2,11 @@ import cv2 # 下载模型和测试图片 -# model_url = "TODO " test_jpg_url = "https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/horses.jpg" -# fd.download(model_url, ".", show_progress=True) fd.download(test_jpg_url, ".", show_progress=True) # 加载模型 -model = fd.vision.wongkinyiu.YOLOv7("/home/fastdeploy/yolov7/onnxfiles/yolov7.onnx") +model = fd.vision.wongkinyiu.YOLOv7("yolov7.onnx") # 预测图片 im = cv2.imread("horses.jpg") From fb376adf9616b9b3aa3d515c739655567161722b Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:46:19 +0000 Subject: [PATCH 11/58] README.md modified --- model_zoo/vision/yolov7/README.md | 10 +++++----- model_zoo/vision/yolov7/cpp/README.md | 10 +++++----- model_zoo/vision/yolov7/cpp/yolov7.cc | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 93a6f81188..77e7a654d1 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -55,11 +55,11 @@ python yolov7.py 执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 ``` DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] -223.395142,403.948669, 345.337189, 867.339050, 0.856906, 0 -668.301758,400.781342, 808.441772, 882.534973, 0.829716, 0 -50.210720,398.571411, 243.123367, 905.016602, 0.805375, 0 -23.768242,214.979370, 802.627686, 778.840881, 0.756311, 5 -0.737200,552.281006, 78.617218, 890.945007, 0.363471, 0 +0.056616,191.221619, 314.871063, 409.948914, 0.955449, 17 +432.547852,211.914841, 594.904297, 346.708618, 0.942706, 17 +0.000000,185.456207, 153.967789, 286.157562, 0.860487, 17 +224.049210,195.147003, 419.658234, 364.004852, 0.798262, 17 +369.316986,209.055725, 456.373840, 321.627625, 0.687066, 17 ``` ## 其它文档 diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index fd46e210f8..012d4c765b 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -22,10 +22,10 @@ wget https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg 执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 ``` DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] -223.395142,403.948669, 345.337189, 867.339050, 0.856906, 0 -668.301758,400.781342, 808.441772, 882.534973, 0.829716, 0 -50.210720,398.571411, 243.123367, 905.016602, 0.805375, 0 -23.768242,214.979370, 802.627686, 778.840881, 0.756311, 5 -0.737200,552.281006, 78.617218, 890.945007, 0.36341 +0.056616,191.221619, 314.871063, 409.948914, 0.955449, 17 +432.547852,211.914841, 594.904297, 346.708618, 0.942706, 17 +0.000000,185.456207, 153.967789, 286.157562, 0.860487, 17 +224.049210,195.147003, 419.658234, 364.004852, 0.798262, 17 +369.316986,209.055725, 456.373840, 321.627625, 0.687066, 17 ``` diff --git a/model_zoo/vision/yolov7/cpp/yolov7.cc b/model_zoo/vision/yolov7/cpp/yolov7.cc index 1607b2be09..8b41c0288b 100644 --- a/model_zoo/vision/yolov7/cpp/yolov7.cc +++ b/model_zoo/vision/yolov7/cpp/yolov7.cc @@ -21,7 +21,7 @@ int main() { std::cerr << "Init Failed." << std::endl; return -1; } - cv::Mat im = cv::imread("bus.jpg"); + cv::Mat im = cv::imread("horses.jpg"); cv::Mat vis_im = im.clone(); vis::DetectionResult res; From 4b8737c9c0577c1a6ba0132ad76b6e72aa9e8e20 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:54:11 +0000 Subject: [PATCH 12/58] file path modified --- model_zoo/vision/yolov7/README.md | 3 +++ model_zoo/vision/yolov7/cpp/yolov7.cc | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 77e7a654d1..70841fa61e 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -26,6 +26,9 @@ # 导出onnx格式文件 python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/ ``` diff --git a/model_zoo/vision/yolov7/cpp/yolov7.cc b/model_zoo/vision/yolov7/cpp/yolov7.cc index 8b41c0288b..6d2a80a85c 100644 --- a/model_zoo/vision/yolov7/cpp/yolov7.cc +++ b/model_zoo/vision/yolov7/cpp/yolov7.cc @@ -16,12 +16,12 @@ int main() { namespace vis = fastdeploy::vision; - auto model = vis::wongkinyiu::YOLOv7("yolov7.onnx"); + auto model = vis::wongkinyiu::YOLOv7("../yolov7.onnx"); if (!model.Initialized()) { std::cerr << "Init Failed." << std::endl; return -1; } - cv::Mat im = cv::imread("horses.jpg"); + cv::Mat im = cv::imread("../horses.jpg"); cv::Mat vis_im = im.clone(); vis::DetectionResult res; From ce922a0326c8dc14964476be7501a896d9e39302 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:57:49 +0000 Subject: [PATCH 13/58] file path modified --- model_zoo/vision/yolov7/cpp/README.md | 27 +++++++++++++++++++++++++-- model_zoo/vision/yolov7/cpp/yolov7.cc | 4 ++-- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index 012d4c765b..bef869f881 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -1,5 +1,29 @@ # 编译YOLOv7示例 +## 生成ONNX文件 + +- 手动获取 + + 访问[YOLOv7](https://github.com/WongKinYiu/yolov7)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + + + + ``` + #下载yolov7模型文件 + wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt + + # 导出onnx格式文件 + python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/ + ``` + + + +- 从PaddlePaddle获取 + + ``` # 下载和解压预测库 @@ -11,8 +35,7 @@ mkdir build & cd build cmake .. make -j -# 下载模型和图片 -wget "TODO" +# 下载图片 wget https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg # 执行 diff --git a/model_zoo/vision/yolov7/cpp/yolov7.cc b/model_zoo/vision/yolov7/cpp/yolov7.cc index 6d2a80a85c..8b41c0288b 100644 --- a/model_zoo/vision/yolov7/cpp/yolov7.cc +++ b/model_zoo/vision/yolov7/cpp/yolov7.cc @@ -16,12 +16,12 @@ int main() { namespace vis = fastdeploy::vision; - auto model = vis::wongkinyiu::YOLOv7("../yolov7.onnx"); + auto model = vis::wongkinyiu::YOLOv7("yolov7.onnx"); if (!model.Initialized()) { std::cerr << "Init Failed." << std::endl; return -1; } - cv::Mat im = cv::imread("../horses.jpg"); + cv::Mat im = cv::imread("horses.jpg"); cv::Mat vis_im = im.clone(); vis::DetectionResult res; From 6e00b82b40e3e8d19944408379ed11fb77a90073 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 06:59:58 +0000 Subject: [PATCH 14/58] file path modified --- model_zoo/vision/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index bef869f881..1b577a7a3a 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -23,7 +23,7 @@ - 从PaddlePaddle获取 - +## 运行demo ``` # 下载和解压预测库 From 8c359fb9defa42ccd404890d26bc55b8f063c176 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 07:02:31 +0000 Subject: [PATCH 15/58] file path modified --- model_zoo/vision/yolov7/cpp/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index 1b577a7a3a..918625eea7 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -15,8 +15,6 @@ # 导出onnx格式文件 python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt - # 移动onnx文件到demo目录 - cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/ ``` @@ -35,6 +33,9 @@ mkdir build & cd build cmake .. make -j +# 移动onnx文件到demo目录 +cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/cpp/build/ + # 下载图片 wget https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg From 906c730255d7e4e1198784f45918984dcfe9820f Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 07:03:37 +0000 Subject: [PATCH 16/58] file path modified --- model_zoo/vision/yolov7/README.md | 2 +- model_zoo/vision/yolov7/cpp/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 70841fa61e..7246a4a7b7 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -12,7 +12,7 @@ └── yolov7.py ``` -## 生成ONNX文件 +## 获取ONNX文件 - 手动获取 diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index 918625eea7..ce6337962d 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -1,6 +1,6 @@ # 编译YOLOv7示例 -## 生成ONNX文件 +## 获取ONNX文件 - 手动获取 From 80c12230f5447966d363f34f57a15abeda1951ae Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 07:36:14 +0000 Subject: [PATCH 17/58] README modified --- model_zoo/vision/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index ce6337962d..0fcaf8ae11 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -37,7 +37,7 @@ make -j cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/cpp/build/ # 下载图片 -wget https://github.com/WongKinYiu/yolov7/blob/main/inference/images/horses.jpg +wget hhttps://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/horses.jpg # 执行 ./yolov7_demo From 6072757fe8af3a7f2a666b638a379865d26e9e59 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 07:36:46 +0000 Subject: [PATCH 18/58] README modified --- model_zoo/vision/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index 0fcaf8ae11..a1d146053a 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -37,7 +37,7 @@ make -j cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/cpp/build/ # 下载图片 -wget hhttps://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/horses.jpg +wget https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/horses.jpg # 执行 ./yolov7_demo From 2c6e6a4836b6c20c4a3ebc562d9cf3722c414423 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 08:25:58 +0000 Subject: [PATCH 19/58] move some helpers to private --- fastdeploy/vision/wongkinyiu/yolov7.h | 43 ++++++++++++++------------- model_zoo/vision/yolov7/api.md | 2 +- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.h b/fastdeploy/vision/wongkinyiu/yolov7.h index b21c04936a..29dffaf2f4 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.h +++ b/fastdeploy/vision/wongkinyiu/yolov7.h @@ -32,27 +32,6 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { // 定义模型的名称 virtual std::string ModelName() const { return "WongKinYiu/yolov7"; } - // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 - virtual bool Initialize(); - - // 输入图像预处理操作 - // Mat为FastDeploy定义的数据结构 - // FDTensor为预处理后的Tensor数据,传给后端进行推理 - // im_info为预处理过程保存的数据,在后处理中需要用到 - virtual bool Preprocess(Mat* mat, FDTensor* outputs, - std::map>* im_info); - - // 后端推理结果后处理,输出给用户 - // infer_result 为后端推理后的输出Tensor - // result 为模型预测的结果 - // im_info 为预处理记录的信息,后处理用于还原box - // conf_threshold 后处理时过滤box的置信度阈值 - // nms_iou_threshold 后处理时NMS设定的iou阈值 - virtual bool Postprocess( - FDTensor& infer_result, DetectionResult* result, - const std::map>& im_info, - float conf_threshold, float nms_iou_threshold); - // 模型预测接口,即用户调用的接口 // im 为用户的输入数据,目前对于CV均定义为cv::Mat // result 为模型预测的输出结构体 @@ -81,6 +60,28 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { int stride; // for offseting the boxes by classes when using NMS float max_wh; + + private: + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + virtual bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + virtual bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + virtual bool Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); }; } // namespace wongkinyiu } // namespace vision diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md index 7c5fc30163..1f40ba645a 100644 --- a/model_zoo/vision/yolov7/api.md +++ b/model_zoo/vision/yolov7/api.md @@ -23,7 +23,7 @@ YOLOv7模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只 > > **参数** > -> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,RGB格式 +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 > > * **conf_threshold**(float): 检测框置信度过滤阈值 > > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 From 48136f0d152af4a1a658af71ddaacfe4498b9f2e Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 08:46:49 +0000 Subject: [PATCH 20/58] add examples for yolov7 --- examples/CMakeLists.txt | 1 + examples/vision/wongkinyiu_yolov7.cc | 52 ++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 examples/vision/wongkinyiu_yolov7.cc diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 4228a3e01f..31cd1723b1 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -17,6 +17,7 @@ endfunction() if (WTIH_VISION_EXAMPLES) add_fastdeploy_executable(vision ultralytics yolov5) add_fastdeploy_executable(vision meituan yolov6) + add_fastdeploy_executable(vision wongkinyiu yolov7) endif() # other examples ... \ No newline at end of file diff --git a/examples/vision/wongkinyiu_yolov7.cc b/examples/vision/wongkinyiu_yolov7.cc new file mode 100644 index 0000000000..7de033cae8 --- /dev/null +++ b/examples/vision/wongkinyiu_yolov7.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "../resources/models/yolov7.onnx"; + std::string img_path = "../resources/images/horses.jpg"; + std::string vis_path = "../resources/outputs/wongkinyiu_yolov7_vis_result.jpg"; + + auto model = vis::wongkinyiu::YOLOv7(model_file); + if (!model.Initialized()) { + std::cerr << "Init Failed! Model: " << model_file << std::endl; + return -1; + } else { + std::cout << "Init Done! Model:" << model_file << std::endl; + } + model.EnableDebug(); + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} From 6feca9233a0503c3e2644b9fa2d1dd76ce5bdbb5 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 09:07:47 +0000 Subject: [PATCH 21/58] api.md modified --- model_zoo/vision/yolov7/api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md index 1f40ba645a..92b16c4755 100644 --- a/model_zoo/vision/yolov7/api.md +++ b/model_zoo/vision/yolov7/api.md @@ -51,7 +51,7 @@ YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 #### predict函数 > ``` -> YOLOv7::predict(cv::Mat* im, DetectionResult* result, +> YOLOv7::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` @@ -59,7 +59,7 @@ YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 > > **参数** > -> > * **im**: 输入图像,注意需为HWC,RGB格式 +> > * **im**: 输入图像,注意需为HWC,BGR格式 > > * **result**: 检测结果,包括检测框,各个框的置信度 > > * **conf_threshold**: 检测框置信度过滤阈值 > > * **nms_iou_threshold**: NMS处理过程中iou阈值 From ae70d4f50ec9981e97dd7b79f3e3265c2105ed0c Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 09:11:01 +0000 Subject: [PATCH 22/58] api.md modified --- model_zoo/vision/yolov7/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md index 92b16c4755..abd2abdcec 100644 --- a/model_zoo/vision/yolov7/api.md +++ b/model_zoo/vision/yolov7/api.md @@ -49,7 +49,7 @@ YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 > * **model_format**(Frontend): 模型格式 -#### predict函数 +#### Predict函数 > ``` > YOLOv7::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, From f591b8567b08afbd1e3894100becaa2ce511424b Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 09:31:25 +0000 Subject: [PATCH 23/58] api.md modified --- model_zoo/vision/yolov7/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md index abd2abdcec..02cf78121c 100644 --- a/model_zoo/vision/yolov7/api.md +++ b/model_zoo/vision/yolov7/api.md @@ -49,7 +49,7 @@ YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 > * **model_format**(Frontend): 模型格式 -#### Predict函数 +#### redict函数 > ``` > YOLOv7::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, From f0def41c8b5e5e2b1d627ada84b2c4b17c84aeac Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 09:41:47 +0000 Subject: [PATCH 24/58] YOLOv7 --- model_zoo/vision/yolov7/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/vision/yolov7/api.md b/model_zoo/vision/yolov7/api.md index 02cf78121c..abd2abdcec 100644 --- a/model_zoo/vision/yolov7/api.md +++ b/model_zoo/vision/yolov7/api.md @@ -49,7 +49,7 @@ YOLOv7模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 > * **model_format**(Frontend): 模型格式 -#### redict函数 +#### Predict函数 > ``` > YOLOv7::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, From 15b91609aae1f81e3d5789d40c18f0aa16e37e86 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 10:50:08 +0000 Subject: [PATCH 25/58] yolov7 release link --- model_zoo/vision/yolov7/README.md | 2 +- model_zoo/vision/yolov7/cpp/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 7246a4a7b7..c81c75d8d2 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -16,7 +16,7 @@ - 手动获取 - 访问[YOLOv7](https://github.com/WongKinYiu/yolov7)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + 访问[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index a1d146053a..c3d4e8bcb2 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -4,7 +4,7 @@ - 手动获取 - 访问[YOLOv7](https://github.com/WongKinYiu/yolov7)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + 访问[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 From 4706e8ca754735d318650c3f7a90b3e00f6ef16a Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 11:01:53 +0000 Subject: [PATCH 26/58] yolov7 release link --- model_zoo/vision/yolov7/README.md | 2 ++ model_zoo/vision/yolov7/cpp/README.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index c81c75d8d2..e330a3055b 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -1,5 +1,7 @@ # 编译YOLOv7示例 +当前支持模型版本为:[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) + 本文档说明如何进行[YOLOv7](https://github.com/WongKinYiu/yolov7)的快速部署推理。本目录结构如下 ``` diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index c3d4e8bcb2..2e9570f224 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -1,5 +1,7 @@ # 编译YOLOv7示例 +当前支持模型版本为:[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) + ## 获取ONNX文件 - 手动获取 From dc8358461f384cc7ee0fcc592a68e5a917925bf6 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 11:05:01 +0000 Subject: [PATCH 27/58] yolov7 release link --- model_zoo/vision/yolov7/README.md | 2 +- model_zoo/vision/yolov7/cpp/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index e330a3055b..7eed2c0c43 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -1,6 +1,6 @@ # 编译YOLOv7示例 -当前支持模型版本为:[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) +当前支持模型版本为:[YOLOv7 v0.1](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) 本文档说明如何进行[YOLOv7](https://github.com/WongKinYiu/yolov7)的快速部署推理。本目录结构如下 diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index 2e9570f224..13a5e8343e 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -1,6 +1,6 @@ # 编译YOLOv7示例 -当前支持模型版本为:[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) +当前支持模型版本为:[YOLOv7 v0.1](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) ## 获取ONNX文件 From 086debd8d3e040d37b0b8cbc006277d91e246baa Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 11:10:43 +0000 Subject: [PATCH 28/58] copyright --- fastdeploy/vision/wongkinyiu/yolov7.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.cc b/fastdeploy/vision/wongkinyiu/yolov7.cc index 6baf4c336b..db470d327e 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.cc +++ b/fastdeploy/vision/wongkinyiu/yolov7.cc @@ -1,3 +1,17 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "fastdeploy/vision/wongkinyiu/yolov7.h" #include "fastdeploy/utils/perf.h" #include "fastdeploy/vision/utils/utils.h" From 4f980b9ce8e2573d76385ca4f0b98febf66f57a4 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Mon, 18 Jul 2022 12:09:04 +0000 Subject: [PATCH 29/58] change some helpers to private --- fastdeploy/vision/wongkinyiu/yolov7.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.h b/fastdeploy/vision/wongkinyiu/yolov7.h index 29dffaf2f4..75cab34ded 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.h +++ b/fastdeploy/vision/wongkinyiu/yolov7.h @@ -63,13 +63,13 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { private: // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 - virtual bool Initialize(); + bool Initialize(); // 输入图像预处理操作 // Mat为FastDeploy定义的数据结构 // FDTensor为预处理后的Tensor数据,传给后端进行推理 // im_info为预处理过程保存的数据,在后处理中需要用到 - virtual bool Preprocess(Mat* mat, FDTensor* outputs, + bool Preprocess(Mat* mat, FDTensor* outputs, std::map>* im_info); // 后端推理结果后处理,输出给用户 @@ -78,7 +78,7 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { // im_info 为预处理记录的信息,后处理用于还原box // conf_threshold 后处理时过滤box的置信度阈值 // nms_iou_threshold 后处理时NMS设定的iou阈值 - virtual bool Postprocess( + bool Postprocess( FDTensor& infer_result, DetectionResult* result, const std::map>& im_info, float conf_threshold, float nms_iou_threshold); From 80beadfa3ce7ebb7cc2d345d4154cd42f6dec785 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Tue, 19 Jul 2022 02:57:08 +0000 Subject: [PATCH 30/58] change variables to const and fix documents. --- fastdeploy/vision/wongkinyiu/yolov7.cc | 6 +++--- model_zoo/vision/yolov7/README.md | 16 ++-------------- model_zoo/vision/yolov7/cpp/README.md | 8 +------- 3 files changed, 6 insertions(+), 24 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.cc b/fastdeploy/vision/wongkinyiu/yolov7.cc index db470d327e..248718a69a 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.cc +++ b/fastdeploy/vision/wongkinyiu/yolov7.cc @@ -20,9 +20,9 @@ namespace fastdeploy { namespace vision { namespace wongkinyiu { -void LetterBox(Mat* mat, std::vector size, std::vector color, - bool _auto, bool scale_fill = false, bool scale_up = true, - int stride = 32) { +void LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill = false, bool scale_up = true, int stride = 32) { float scale = std::min(size[1] * 1.0 / mat->Height(), size[0] * 1.0 / mat->Width()); if (!scale_up) { diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 7eed2c0c43..2bb13ce459 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -20,12 +20,12 @@ 访问[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 - + ``` #下载yolov7模型文件 wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt - + # 导出onnx格式文件 python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt @@ -33,12 +33,6 @@ cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/ ``` - - -- 从PaddlePaddle获取 - - - ## 安装FastDeploy 使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` @@ -71,9 +65,3 @@ DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] - [C++部署](./cpp/README.md) - [YOLOv7 API文档](./api.md) - - - - - - diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index 13a5e8343e..f216c1aecf 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -8,20 +8,15 @@ 访问[YOLOv7](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)官方github库,按照指引下载安装,下载`yolov7.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 - - ``` #下载yolov7模型文件 wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt - + # 导出onnx格式文件 python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt ``` - - -- 从PaddlePaddle获取 ## 运行demo @@ -54,4 +49,3 @@ DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] 224.049210,195.147003, 419.658234, 364.004852, 0.798262, 17 369.316986,209.055725, 456.373840, 321.627625, 0.687066, 17 ``` - From f5f7a863e09490213c5ea51fd83c584ff10752df Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Tue, 19 Jul 2022 05:16:07 +0000 Subject: [PATCH 31/58] gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 39783b8839..51f2f2ed80 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,4 @@ fastdeploy.egg-info .setuptools-cmake-build fastdeploy/version.py fastdeploy/LICENSE* -fastdeploy/ThirdPartyNotices* \ No newline at end of file +fastdeploy/ThirdPartyNotices* From e6cec25cace95e029adc08412aa359486446ec6d Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Tue, 19 Jul 2022 08:05:01 +0000 Subject: [PATCH 32/58] Transfer some funtions to private member of class --- fastdeploy/vision/wongkinyiu/yolov7.cc | 10 +++++----- fastdeploy/vision/wongkinyiu/yolov7.h | 17 ++++++++++++----- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.cc b/fastdeploy/vision/wongkinyiu/yolov7.cc index 248718a69a..532f552947 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.cc +++ b/fastdeploy/vision/wongkinyiu/yolov7.cc @@ -20,9 +20,9 @@ namespace fastdeploy { namespace vision { namespace wongkinyiu { -void LetterBox(Mat* mat, const std::vector& size, - const std::vector& color, bool _auto, - bool scale_fill = false, bool scale_up = true, int stride = 32) { +void YOLOv7::LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill, bool scale_up, int stride) { float scale = std::min(size[1] * 1.0 / mat->Height(), size[0] * 1.0 / mat->Width()); if (!scale_up) { @@ -107,8 +107,8 @@ bool YOLOv7::Preprocess(Mat* mat, FDTensor* output, // 1. letterbox // 2. BGR->RGB // 3. HWC->CHW - LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, - stride); + YOLOv7::LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, + is_scale_up, stride); BGR2RGB::Run(mat); Normalize::Run(mat, std::vector(mat->Channels(), 0.0), std::vector(mat->Channels(), 1.0)); diff --git a/fastdeploy/vision/wongkinyiu/yolov7.h b/fastdeploy/vision/wongkinyiu/yolov7.h index 75cab34ded..90be9ea463 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.h +++ b/fastdeploy/vision/wongkinyiu/yolov7.h @@ -70,7 +70,7 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { // FDTensor为预处理后的Tensor数据,传给后端进行推理 // im_info为预处理过程保存的数据,在后处理中需要用到 bool Preprocess(Mat* mat, FDTensor* outputs, - std::map>* im_info); + std::map>* im_info); // 后端推理结果后处理,输出给用户 // infer_result 为后端推理后的输出Tensor @@ -78,10 +78,17 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { // im_info 为预处理记录的信息,后处理用于还原box // conf_threshold 后处理时过滤box的置信度阈值 // nms_iou_threshold 后处理时NMS设定的iou阈值 - bool Postprocess( - FDTensor& infer_result, DetectionResult* result, - const std::map>& im_info, - float conf_threshold, float nms_iou_threshold); + bool Postprocess(FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 对图片进行LetterBox处理 + // mat 为输入图片 + // size 为输入图片的size + void LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill = false, bool scale_up = true, + int stride = 32); }; } // namespace wongkinyiu } // namespace vision From e25e4f2a5c18ffe45bd3b8574dbe7c612a528e72 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Tue, 19 Jul 2022 08:07:49 +0000 Subject: [PATCH 33/58] Transfer some funtions to private member of class --- fastdeploy/vision/wongkinyiu/yolov7.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/yolov7.h b/fastdeploy/vision/wongkinyiu/yolov7.h index 90be9ea463..c494754f0e 100644 --- a/fastdeploy/vision/wongkinyiu/yolov7.h +++ b/fastdeploy/vision/wongkinyiu/yolov7.h @@ -83,8 +83,8 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { float conf_threshold, float nms_iou_threshold); // 对图片进行LetterBox处理 - // mat 为输入图片 - // size 为输入图片的size + // mat 为读取到的原图 + // size 为输入模型的图像尺寸 void LetterBox(Mat* mat, const std::vector& size, const std::vector& color, bool _auto, bool scale_fill = false, bool scale_up = true, From e8a8439dd97e0a6d52f299bff2958290637687c8 Mon Sep 17 00:00:00 2001 From: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com> Date: Wed, 20 Jul 2022 15:25:57 +0800 Subject: [PATCH 34/58] Merge from develop (#9) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> --- examples/CMakeLists.txt | 26 +-- examples/vision/ppdet_ppyoloe.cc | 51 ++++++ fastdeploy/__init__.py | 2 +- fastdeploy/download.py | 2 +- fastdeploy/utils/utils.h | 4 + fastdeploy/vision.h | 1 + fastdeploy/vision/__init__.py | 1 + .../vision/common/processors/convert.cc | 62 +++++++ fastdeploy/vision/common/processors/convert.h | 42 +++++ .../vision/common/processors/transform.h | 1 + fastdeploy/vision/meituan/yolov6.cc | 28 +-- fastdeploy/vision/ppcls/model.cc | 19 +- fastdeploy/vision/ppcls/model.h | 16 +- fastdeploy/vision/ppcls/ppcls_pybind.cc | 2 +- fastdeploy/vision/ppdet/__init__.py | 39 ++++ fastdeploy/vision/ppdet/ppdet_pybind.cc | 32 ++++ fastdeploy/vision/ppdet/ppyoloe.cc | 170 ++++++++++++++++++ fastdeploy/vision/ppdet/ppyoloe.h | 44 +++++ fastdeploy/vision/ultralytics/yolov5.cc | 19 +- fastdeploy/vision/utils/sort_det_res.cc | 6 +- fastdeploy/vision/vision_pybind.cc | 10 +- fastdeploy/vision/visualize/detection.cc | 4 +- model_zoo/vision/ppyoloe/README.md | 52 ++++++ model_zoo/vision/ppyoloe/api.md | 74 ++++++++ model_zoo/vision/ppyoloe/cpp/CMakeLists.txt | 17 ++ model_zoo/vision/ppyoloe/cpp/README.md | 39 ++++ model_zoo/vision/ppyoloe/cpp/ppyoloe.cc | 51 ++++++ model_zoo/vision/ppyoloe/ppyoloe.py | 24 +++ setup.py | 30 +++- 29 files changed, 818 insertions(+), 50 deletions(-) create mode 100644 examples/vision/ppdet_ppyoloe.cc create mode 100644 fastdeploy/vision/common/processors/convert.cc create mode 100644 fastdeploy/vision/common/processors/convert.h create mode 100644 fastdeploy/vision/ppdet/__init__.py create mode 100644 fastdeploy/vision/ppdet/ppdet_pybind.cc create mode 100644 fastdeploy/vision/ppdet/ppyoloe.cc create mode 100644 fastdeploy/vision/ppdet/ppyoloe.h create mode 100644 model_zoo/vision/ppyoloe/README.md create mode 100644 model_zoo/vision/ppyoloe/api.md create mode 100644 model_zoo/vision/ppyoloe/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/ppyoloe/cpp/README.md create mode 100644 model_zoo/vision/ppyoloe/cpp/ppyoloe.cc create mode 100644 model_zoo/vision/ppyoloe/ppyoloe.py diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 1e2dc43bd4..112193c86a 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,24 +1,26 @@ -function(add_fastdeploy_executable field url model) +function(add_fastdeploy_executable FIELD CC_FILE) # temp target name/file var in function scope - set(TEMP_TARGET_FILE ${PROJECT_SOURCE_DIR}/examples/${field}/${url}_${model}.cc) - set(TEMP_TARGET_NAME ${field}_${url}_${model}) + set(TEMP_TARGET_FILE ${CC_FILE}) + string(REGEX MATCHALL "[0-9A-Za-z_]*.cc" FILE_NAME ${CC_FILE}) + string(REGEX REPLACE ".cc" "" FILE_PREFIX ${FILE_NAME}) + set(TEMP_TARGET_NAME ${FIELD}_${FILE_PREFIX}) if (EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy) add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE}) target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy) - message(STATUS "Found source file: [${field}/${url}_${model}.cc], ADD!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") - else () - message(WARNING "Can not found source file: [${field}/${url}_${model}.cc], SKIP!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") + message(STATUS " Added FastDeploy Executable : ${TEMP_TARGET_NAME}") endif() unset(TEMP_TARGET_FILE) unset(TEMP_TARGET_NAME) endfunction() # vision examples -if (WITH_VISION_EXAMPLES) - add_fastdeploy_executable(vision ultralytics yolov5) - add_fastdeploy_executable(vision meituan yolov6) - add_fastdeploy_executable(vision wongkinyiu yolov7) - add_fastdeploy_executable(vision megvii yolox) +if(WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples/vision) + message(STATUS "") + message(STATUS "*************FastDeploy Examples Summary**********") + file(GLOB ALL_VISION_EXAMPLE_SRCS ${PROJECT_SOURCE_DIR}/examples/vision/*.cc) + foreach(_CC_FILE ${ALL_VISION_EXAMPLE_SRCS}) + add_fastdeploy_executable(vision ${_CC_FILE}) + endforeach() endif() -# other examples ... \ No newline at end of file +# other examples ... diff --git a/examples/vision/ppdet_ppyoloe.cc b/examples/vision/ppdet_ppyoloe.cc new file mode 100644 index 0000000000..b234021c92 --- /dev/null +++ b/examples/vision/ppdet_ppyoloe.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + std::string img_path = "test.jpeg"; + std::string vis_path = "vis.jpeg"; + + auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/fastdeploy/__init__.py b/fastdeploy/__init__.py index 500e7cc42a..68006c1bed 100644 --- a/fastdeploy/__init__.py +++ b/fastdeploy/__init__.py @@ -17,7 +17,7 @@ from .fastdeploy_runtime import * from . import fastdeploy_main as C from . import vision -from .download import download +from .download import download, download_and_decompress def TensorInfoStr(tensor_info): diff --git a/fastdeploy/download.py b/fastdeploy/download.py index e00af098df..67f21d8e76 100644 --- a/fastdeploy/download.py +++ b/fastdeploy/download.py @@ -156,7 +156,7 @@ def decompress(fname): def url2dir(url, path, rename=None): full_name = download(url, path, rename, show_progress=True) - print("SDK is donwloaded, now extracting...") + print("File is donwloaded, now extracting...") if url.count(".tgz") > 0 or url.count(".tar") > 0 or url.count("zip") > 0: return decompress(full_name) diff --git a/fastdeploy/utils/utils.h b/fastdeploy/utils/utils.h index 1b9f625b5e..9312084265 100644 --- a/fastdeploy/utils/utils.h +++ b/fastdeploy/utils/utils.h @@ -64,6 +64,10 @@ class FASTDEPLOY_DECL FDLogger { bool verbose_ = true; }; +#ifndef __REL_FILE__ +#define __REL_FILE__ __FILE__ +#endif + #define FDERROR \ FDLogger(true, "[ERROR]") \ << __REL_FILE__ << "(" << __LINE__ << ")::" << __FUNCTION__ << "\t" diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index ac3f006c0a..cafe310c70 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -16,6 +16,7 @@ #include "fastdeploy/core/config.h" #ifdef ENABLE_VISION #include "fastdeploy/vision/ppcls/model.h" +#include "fastdeploy/vision/ppdet/ppyoloe.h" #include "fastdeploy/vision/ultralytics/yolov5.h" #include "fastdeploy/vision/wongkinyiu/yolov7.h" #include "fastdeploy/vision/meituan/yolov6.h" diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 7122bede0b..6acbf0c376 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -15,6 +15,7 @@ from . import evaluation from . import ppcls +from . import ppdet from . import ultralytics from . import meituan from . import megvii diff --git a/fastdeploy/vision/common/processors/convert.cc b/fastdeploy/vision/common/processors/convert.cc new file mode 100644 index 0000000000..a7ca6de07a --- /dev/null +++ b/fastdeploy/vision/common/processors/convert.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/convert.h" + +namespace fastdeploy { + +namespace vision { + +Convert::Convert(const std::vector& alpha, + const std::vector& beta) { + FDASSERT(alpha.size() == beta.size(), + "Convert: requires the size of alpha equal to the size of beta."); + FDASSERT(alpha.size() != 0, + "Convert: requires the size of alpha and beta > 0."); + alpha_.assign(alpha.begin(), alpha.end()); + beta_.assign(beta.begin(), beta.end()); +} + +bool Convert::CpuRun(Mat* mat) { + cv::Mat* im = mat->GetCpuMat(); + std::vector split_im; + cv::split(*im, split_im); + for (int c = 0; c < im->channels(); c++) { + split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); + } + cv::merge(split_im, *im); + return true; +} + +#ifdef ENABLE_OPENCV_CUDA +bool Convert::GpuRun(Mat* mat) { + cv::cuda::GpuMat* im = mat->GetGpuMat(); + std::vector split_im; + cv::cuda::split(*im, split_im); + for (int c = 0; c < im->channels(); c++) { + split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); + } + cv::cuda::merge(split_im, *im); + return true; +} +#endif + +bool Convert::Run(Mat* mat, const std::vector& alpha, + const std::vector& beta, ProcLib lib) { + auto c = Convert(alpha, beta); + return c(mat, lib); +} + +} // namespace vision +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/common/processors/convert.h b/fastdeploy/vision/common/processors/convert.h new file mode 100644 index 0000000000..5d5a5276f5 --- /dev/null +++ b/fastdeploy/vision/common/processors/convert.h @@ -0,0 +1,42 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/vision/common/processors/base.h" + +namespace fastdeploy { +namespace vision { +class Convert : public Processor { + public: + Convert(const std::vector& alpha, const std::vector& beta); + + bool CpuRun(Mat* mat); +#ifdef ENABLE_OPENCV_CUDA + bool GpuRun(Mat* mat); +#endif + std::string Name() { return "Convert"; } + + // Compute `result = mat * alpha + beta` directly by channel. + // The default behavior is the same as OpenCV's convertTo method. + static bool Run(Mat* mat, const std::vector& alpha, + const std::vector& beta, + ProcLib lib = ProcLib::OPENCV_CPU); + + private: + std::vector alpha_; + std::vector beta_; +}; +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 12eec8d72d..08073b4e42 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -17,6 +17,7 @@ #include "fastdeploy/vision/common/processors/cast.h" #include "fastdeploy/vision/common/processors/center_crop.h" #include "fastdeploy/vision/common/processors/color_space_convert.h" +#include "fastdeploy/vision/common/processors/convert.h" #include "fastdeploy/vision/common/processors/hwc2chw.h" #include "fastdeploy/vision/common/processors/normalize.h" #include "fastdeploy/vision/common/processors/pad.h" diff --git a/fastdeploy/vision/meituan/yolov6.cc b/fastdeploy/vision/meituan/yolov6.cc index 8f37bf89c6..8ac7377194 100644 --- a/fastdeploy/vision/meituan/yolov6.cc +++ b/fastdeploy/vision/meituan/yolov6.cc @@ -25,14 +25,14 @@ namespace meituan { void LetterBox(Mat* mat, std::vector size, std::vector color, bool _auto, bool scale_fill = false, bool scale_up = true, int stride = 32) { - float scale = std::min(size[1] * 1.0f / static_cast(mat->Height()), - size[0] * 1.0f / static_cast(mat->Width())); + float scale = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); if (!scale_up) { scale = std::min(scale, 1.0f); } int resize_h = int(round(static_cast(mat->Height()) * scale)); - int resize_w = int(round(static_cast(mat->Width()) * scale)); + int resize_w = int(round(static_cast(mat->Width()) * scale)); int pad_w = size[0] - resize_w; int pad_h = size[1] - resize_h; @@ -85,13 +85,13 @@ bool YOLOv6::Initialize() { is_scale_up = false; stride = 32; max_wh = 4096.0f; - + if (!InitRuntime()) { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } - // Check if the input shape is dynamic after Runtime already initialized, - // Note that, We need to force is_mini_pad 'false' to keep static + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. is_dynamic_input_ = false; auto shape = InputInfoOfRuntime(0).shape; @@ -102,7 +102,7 @@ bool YOLOv6::Initialize() { break; } } - if (!is_dynamic_input_) { + if (!is_dynamic_input_) { is_mini_pad = false; } return true; @@ -111,15 +111,15 @@ bool YOLOv6::Initialize() { bool YOLOv6::Preprocess(Mat* mat, FDTensor* output, std::map>* im_info) { // process after image load - float ratio = std::min(size[1] * 1.0f / static_cast(mat->Height()), - size[0] * 1.0f / static_cast(mat->Width())); + float ratio = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); if (ratio != 1.0) { int interp = cv::INTER_AREA; if (ratio > 1.0) { interp = cv::INTER_LINEAR; } int resize_h = int(round(static_cast(mat->Height()) * ratio)); - int resize_w = int(round(static_cast(mat->Width()) * ratio)); + int resize_w = int(round(static_cast(mat->Width()) * ratio)); Resize::Run(mat, resize_w, resize_h, -1, -1, interp); } // yolov6's preprocess steps @@ -129,8 +129,12 @@ bool YOLOv6::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - std::vector(mat->Channels(), 1.0)); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), diff --git a/fastdeploy/vision/ppcls/model.cc b/fastdeploy/vision/ppcls/model.cc index 915cb97512..c4e5b767c7 100644 --- a/fastdeploy/vision/ppcls/model.cc +++ b/fastdeploy/vision/ppcls/model.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/utils/utils.h" @@ -135,6 +148,6 @@ bool Model::Predict(cv::Mat* im, ClassifyResult* result, int topk) { return true; } -} // namespace ppcls -} // namespace vision -} // namespace fastdeploy +} // namespace ppcls +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppcls/model.h b/fastdeploy/vision/ppcls/model.h index 36841d74c6..265f92d32b 100644 --- a/fastdeploy/vision/ppcls/model.h +++ b/fastdeploy/vision/ppcls/model.h @@ -1,7 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #pragma once #include "fastdeploy/fastdeploy_model.h" -#include "fastdeploy/vision/common/result.h" #include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" namespace fastdeploy { namespace vision { diff --git a/fastdeploy/vision/ppcls/ppcls_pybind.cc b/fastdeploy/vision/ppcls/ppcls_pybind.cc index ef3fffee8e..1abc0b2b7c 100644 --- a/fastdeploy/vision/ppcls/ppcls_pybind.cc +++ b/fastdeploy/vision/ppcls/ppcls_pybind.cc @@ -14,7 +14,7 @@ #include "fastdeploy/pybind/main.h" namespace fastdeploy { -void BindPpClsModel(pybind11::module& m) { +void BindPPCls(pybind11::module& m) { auto ppcls_module = m.def_submodule("ppcls", "Module to deploy PaddleClas."); pybind11::class_(ppcls_module, "Model") .def(pybind11::init(ppdet_module, + "PPYOLOE") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::PPYOLOE& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.cc b/fastdeploy/vision/ppdet/ppyoloe.cc new file mode 100644 index 0000000000..c215ecb0ca --- /dev/null +++ b/fastdeploy/vision/ppdet/ppyoloe.cc @@ -0,0 +1,170 @@ +#include "fastdeploy/vision/ppdet/ppyoloe.h" +#include "fastdeploy/vision/utils/utils.h" +#include "yaml-cpp/yaml.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER}; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool PPYOLOE::Initialize() { + if (!BuildPreprocessPipelineFromConfig()) { + std::cout << "Failed to build preprocess pipeline from configuration file." + << std::endl; + return false; + } + if (!InitRuntime()) { + std::cout << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool PPYOLOE::BuildPreprocessPipelineFromConfig() { + processors_.clear(); + YAML::Node cfg; + try { + cfg = YAML::LoadFile(config_file_); + } catch (YAML::BadFile& e) { + std::cout << "Failed to load yaml file " << config_file_ + << ", maybe you should check this file." << std::endl; + return false; + } + + if (cfg["arch"].as() != "YOLO") { + std::cout << "Require the arch of model is YOLO, but arch defined in " + "config file is " + << cfg["arch"].as() << "." << std::endl; + return false; + } + processors_.push_back(std::make_shared()); + + for (const auto& op : cfg["Preprocess"]) { + std::string op_name = op["type"].as(); + if (op_name == "NormalizeImage") { + auto mean = op["mean"].as>(); + auto std = op["std"].as>(); + bool is_scale = op["is_scale"].as(); + processors_.push_back(std::make_shared(mean, std, is_scale)); + } else if (op_name == "Resize") { + bool keep_ratio = op["keep_ratio"].as(); + auto target_size = op["target_size"].as>(); + int interp = op["interp"].as(); + FDASSERT(target_size.size(), + "Require size of target_size be 2, but now it's " + + std::to_string(target_size.size()) + "."); + FDASSERT(!keep_ratio, + "Only support keep_ratio is false while deploy " + "PaddleDetection model."); + int width = target_size[1]; + int height = target_size[0]; + processors_.push_back( + std::make_shared(width, height, -1.0, -1.0, interp, false)); + } else if (op_name == "Permute") { + processors_.push_back(std::make_shared()); + } else { + std::cout << "Unexcepted preprocess operator: " << op_name << "." + << std::endl; + return false; + } + } + return true; +} + +bool PPYOLOE::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + std::cout << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + } + + outputs->resize(2); + (*outputs)[0].name = InputInfoOfRuntime(0).name; + mat->ShareWithTensor(&((*outputs)[0])); + + // reshape to [1, c, h, w] + (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1); + + (*outputs)[1].Allocate({1, 2}, FDDataType::FP32, InputInfoOfRuntime(1).name); + float* ptr = static_cast((*outputs)[1].MutableData()); + ptr[0] = mat->Height() * 1.0 / mat->Height(); + ptr[1] = mat->Width() * 1.0 / mat->Width(); + return true; +} + +bool PPYOLOE::Postprocess(std::vector& infer_result, + DetectionResult* result, float conf_threshold, + float nms_threshold) { + FDASSERT(infer_result[1].shape[0] == 1, + "Only support batch = 1 in FastDeploy now."); + int box_num = 0; + if (infer_result[1].dtype == FDDataType::INT32) { + box_num = *(static_cast(infer_result[1].Data())); + } else if (infer_result[1].dtype == FDDataType::INT64) { + box_num = *(static_cast(infer_result[1].Data())); + } else { + FDASSERT( + false, + "The output box_num of PPYOLOE model should be type of int32/int64."); + } + result->Reserve(box_num); + float* box_data = static_cast(infer_result[0].Data()); + for (size_t i = 0; i < box_num; ++i) { + if (box_data[i * 6 + 1] < conf_threshold) { + continue; + } + result->label_ids.push_back(box_data[i * 6]); + result->scores.push_back(box_data[i * 6 + 1]); + result->boxes.emplace_back( + std::array{box_data[i * 6 + 2], box_data[i * 6 + 3], + box_data[i * 6 + 4] - box_data[i * 6 + 2], + box_data[i * 6 + 5] - box_data[i * 6 + 3]}); + } + return true; +} + +bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold, float iou_threshold) { + Mat mat(*im); + std::vector processed_data; + if (!Preprocess(&mat, &processed_data)) { + FDERROR << "Failed to preprocess input data while using model:" + << ModelName() << "." << std::endl; + return false; + } + + std::vector infer_result; + if (!Infer(processed_data, &infer_result)) { + FDERROR << "Failed to inference while using model:" << ModelName() << "." + << std::endl; + return false; + } + + if (!Postprocess(infer_result, result, conf_threshold, iou_threshold)) { + FDERROR << "Failed to postprocess while using model:" << ModelName() << "." + << std::endl; + return false; + } + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.h b/fastdeploy/vision/ppdet/ppyoloe.h new file mode 100644 index 0000000000..a3db268ca4 --- /dev/null +++ b/fastdeploy/vision/ppdet/ppyoloe.h @@ -0,0 +1,44 @@ +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { + public: + PPYOLOE(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } + + virtual bool Initialize(); + + virtual bool BuildPreprocessPipelineFromConfig(); + + virtual bool Preprocess(Mat* mat, std::vector* outputs); + + virtual bool Postprocess(std::vector& infer_result, + DetectionResult* result, float conf_threshold, + float nms_threshold); + + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.5, float nms_threshold = 0.7); + + private: + std::vector> processors_; + std::string config_file_; + // PaddleDetection can export model without nms + // This flag will help us to handle the different + // situation + bool has_nms_; +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ultralytics/yolov5.cc b/fastdeploy/vision/ultralytics/yolov5.cc index 193cfe9794..0b7e50e735 100644 --- a/fastdeploy/vision/ultralytics/yolov5.cc +++ b/fastdeploy/vision/ultralytics/yolov5.cc @@ -87,8 +87,8 @@ bool YOLOv5::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } - // Check if the input shape is dynamic after Runtime already initialized, - // Note that, We need to force is_mini_pad 'false' to keep static + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. is_dynamic_input_ = false; auto shape = InputInfoOfRuntime(0).shape; @@ -99,7 +99,7 @@ bool YOLOv5::Initialize() { break; } } - if (!is_dynamic_input_) { + if (!is_dynamic_input_) { is_mini_pad = false; } return true; @@ -126,8 +126,12 @@ bool YOLOv5::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - std::vector(mat->Channels(), 1.0)); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), @@ -198,6 +202,11 @@ bool YOLOv5::Postprocess( result->scores.push_back(confidence); } } + + if (result->boxes.size() == 0) { + return true; + } + utils::NMS(result, nms_iou_threshold); // scale the boxes to the origin image shape diff --git a/fastdeploy/vision/utils/sort_det_res.cc b/fastdeploy/vision/utils/sort_det_res.cc index e4a0db9761..93dbb69694 100644 --- a/fastdeploy/vision/utils/sort_det_res.cc +++ b/fastdeploy/vision/utils/sort_det_res.cc @@ -68,7 +68,11 @@ void MergeSort(DetectionResult* result, size_t low, size_t high) { void SortDetectionResult(DetectionResult* result) { size_t low = 0; - size_t high = result->scores.size() - 1; + size_t high = result->scores.size(); + if (high == 0) { + return; + } + high = high - 1; MergeSort(result, low, high); } diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index 41ada5541a..0334303ce6 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -16,7 +16,8 @@ namespace fastdeploy { -void BindPpClsModel(pybind11::module& m); +void BindPPCls(pybind11::module& m); +void BindPPDet(pybind11::module& m); void BindWongkinyiu(pybind11::module& m); void BindUltralytics(pybind11::module& m); void BindMeituan(pybind11::module& m); @@ -41,13 +42,14 @@ void BindVision(pybind11::module& m) { .def("__repr__", &vision::DetectionResult::Str) .def("__str__", &vision::DetectionResult::Str); - BindPpClsModel(m); + BindPPCls(m); + BindPPDet(m); BindUltralytics(m); BindWongkinyiu(m); BindMeituan(m); BindMegvii(m); #ifdef ENABLE_VISION_VISUALIZE BindVisualize(m); -#endif +#endif } -} // namespace fastdeploy +} // namespace fastdeploy diff --git a/fastdeploy/vision/visualize/detection.cc b/fastdeploy/vision/visualize/detection.cc index d0c4116148..5b5538bff7 100644 --- a/fastdeploy/vision/visualize/detection.cc +++ b/fastdeploy/vision/visualize/detection.cc @@ -43,7 +43,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, } std::string text = id + "," + score; int font = cv::FONT_HERSHEY_SIMPLEX; - cv::Size text_size = cv::getTextSize(text, font, font_size, 0.5, nullptr); + cv::Size text_size = cv::getTextSize(text, font, font_size, 1, nullptr); cv::Point origin; origin.x = rect.x; origin.y = rect.y; @@ -52,7 +52,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, text_size.width, text_size.height); cv::rectangle(*im, rect, rect_color, line_size); cv::putText(*im, text, origin, font, font_size, cv::Scalar(255, 255, 255), - 0.5); + 1); } } diff --git a/model_zoo/vision/ppyoloe/README.md b/model_zoo/vision/ppyoloe/README.md new file mode 100644 index 0000000000..42d18104ad --- /dev/null +++ b/model_zoo/vision/ppyoloe/README.md @@ -0,0 +1,52 @@ +# PaddleDetection/PPYOLOE部署示例 + +- 当前支持PaddleDetection版本为[release/2.4](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4) + +本文档说明如何进行[PPYOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)的快速部署推理。本目录结构如下 +``` +. +├── cpp # C++ 代码目录 +│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 +│   ├── README.md # C++ 代码编译部署文档 +│   └── ppyoloe.cc # C++ 示例代码 +├── README.md # PPYOLOE 部署文档 +└── ppyoloe.py # Python示例代码 +``` + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python +``` + +## Python部署 + +执行如下代码即会自动下载PPYOLOE模型和测试图片 +``` +python ppyoloe.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 +414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 +163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 +267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 +581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 +104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 +348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 +364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 +75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 +328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 +504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 +379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 +25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [PPYOLOE API文档](./api.md) diff --git a/model_zoo/vision/ppyoloe/api.md b/model_zoo/vision/ppyoloe/api.md new file mode 100644 index 0000000000..1c5cbcaadb --- /dev/null +++ b/model_zoo/vision/ppyoloe/api.md @@ -0,0 +1,74 @@ +# PPYOLOE API说明 + +## Python API + +### PPYOLOE类 +``` +fastdeploy.vision.ultralytics.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=fd.Frontend.PADDLE) +``` +PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 模型推理配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) + +示例代码参考[ppyoloe.py](./ppyoloe.py) + + +## C++ API + +### PPYOLOE类 +``` +fastdeploy::vision::ultralytics::PPYOLOE( + const string& model_file, + const string& params_file, + const string& config_file, + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 模型推理配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) + +示例代码参考[cpp/yolov5.cc](cpp/yolov5.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt new file mode 100644 index 0000000000..e681566517 --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(ppyoloe_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(ppyoloe_demo ${PROJECT_SOURCE_DIR}/ppyoloe.cc) +# 添加FastDeploy库依赖 +target_link_libraries(ppyoloe_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/ppyoloe/cpp/README.md b/model_zoo/vision/ppyoloe/cpp/README.md new file mode 100644 index 0000000000..1027c2eeb2 --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/README.md @@ -0,0 +1,39 @@ +# 编译PPYOLOE示例 + + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 下载模型和图片 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz +tar xvf ppyoloe_crn_l_300e_coco.tgz +wget https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg + +# 执行 +./ppyoloe_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 +414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 +163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 +267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 +581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 +104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 +348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 +364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 +75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 +328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 +504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 +379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 +25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 +``` diff --git a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc new file mode 100644 index 0000000000..e63f29e62a --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + std::string img_path = "000000014439_640x640.jpg"; + std::string vis_path = "vis.jpeg"; + + auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/model_zoo/vision/ppyoloe/ppyoloe.py b/model_zoo/vision/ppyoloe/ppyoloe.py new file mode 100644 index 0000000000..7d79dfd8cf --- /dev/null +++ b/model_zoo/vision/ppyoloe/ppyoloe.py @@ -0,0 +1,24 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +model_url = "https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz" +test_jpg_url = "https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg" +fd.download_and_decompress(model_url, ".") +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.ppdet.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel", + "ppyoloe_crn_l_300e_coco/model.pdiparams", + "ppyoloe_crn_l_300e_coco/infer_cfg.yml") + +# 预测图片 +im = cv2.imread("000000014439_640x640.jpg") +result = model.predict(im, conf_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) diff --git a/setup.py b/setup.py index f0ff3f16de..e76f057b1c 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,8 @@ setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF") setup_configs["WITH_GPU"] = os.getenv("WITH_GPU", "OFF") setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED") -setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", "/usr/local/cuda") +setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", + "/usr/local/cuda") TOP_DIR = os.path.realpath(os.path.dirname(__file__)) SRC_DIR = os.path.join(TOP_DIR, "fastdeploy") @@ -325,17 +326,32 @@ def run(self): shutil.copy("LICENSE", "fastdeploy") depend_libs = list() - # modify the search path of libraries - command = "patchelf --set-rpath '$ORIGIN/libs/' .setuptools-cmake-build/fastdeploy_main.cpython-36m-x86_64-linux-gnu.so" - # The sw_64 not suppot patchelf, so we just disable that. - if platform.machine() != 'sw_64' and platform.machine() != 'mips64': - assert os.system(command) == 0, "patch fastdeploy_main.cpython-36m-x86_64-linux-gnu.so failed, the command: {}".format(command) + if platform.system().lower() == "linux": + for f in os.listdir(".setuptools-cmake-build"): + full_name = os.path.join(".setuptools-cmake-build", f) + if not os.path.isfile(full_name): + continue + if not full_name.count("fastdeploy_main.cpython-"): + continue + if not full_name.endswith(".so"): + continue + # modify the search path of libraries + command = "patchelf --set-rpath '$ORIGIN/libs/' {}".format( + full_name) + # The sw_64 not suppot patchelf, so we just disable that. + if platform.machine() != 'sw_64' and platform.machine( + ) != 'mips64': + assert os.system( + command + ) == 0, "patch fastdeploy_main.cpython-36m-x86_64-linux-gnu.so failed, the command: {}".format( + command) for f in os.listdir(".setuptools-cmake-build"): if not os.path.isfile(os.path.join(".setuptools-cmake-build", f)): continue if f.count("libfastdeploy") > 0: - shutil.copy(os.path.join(".setuptools-cmake-build", f), "fastdeploy/libs") + shutil.copy( + os.path.join(".setuptools-cmake-build", f), "fastdeploy/libs") for dirname in os.listdir(".setuptools-cmake-build/third_libs/install"): for lib in os.listdir( os.path.join(".setuptools-cmake-build/third_libs/install", From a182893d9232c3ff0ecda5d07ec6517ddca8f449 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 20 Jul 2022 07:38:15 +0000 Subject: [PATCH 35/58] first commit for yolor --- examples/CMakeLists.txt | 25 +- .../{ppdet_ppyoloe.cc => wongkinyiu_yolor.cc} | 15 +- fastdeploy/__init__.py | 2 +- fastdeploy/download.py | 2 +- fastdeploy/utils/utils.h | 27 +- fastdeploy/vision.h | 6 +- fastdeploy/vision/__init__.py | 1 - .../vision/common/processors/convert.cc | 62 ----- fastdeploy/vision/common/processors/convert.h | 42 --- .../vision/common/processors/transform.h | 1 - fastdeploy/vision/meituan/yolov6.cc | 8 +- fastdeploy/vision/ppcls/model.cc | 19 +- fastdeploy/vision/ppcls/model.h | 20 +- fastdeploy/vision/ppcls/ppcls_pybind.cc | 4 +- fastdeploy/vision/ppdet/__init__.py | 39 --- fastdeploy/vision/ppdet/ppdet_pybind.cc | 32 --- fastdeploy/vision/ppdet/ppyoloe.cc | 170 ------------ fastdeploy/vision/ppdet/ppyoloe.h | 44 ---- fastdeploy/vision/ultralytics/yolov5.cc | 13 +- fastdeploy/vision/utils/sort_det_res.cc | 6 +- fastdeploy/vision/vision_pybind.cc | 6 +- fastdeploy/vision/visualize/detection.cc | 8 +- fastdeploy/vision/wongkinyiu/__init__.py | 98 +++++++ .../vision/wongkinyiu/wongkinyiu_pybind.cc | 21 +- fastdeploy/vision/wongkinyiu/yolor.cc | 243 ++++++++++++++++++ fastdeploy/vision/wongkinyiu/yolor.h | 95 +++++++ model_zoo/vision/ppyoloe/README.md | 52 ---- model_zoo/vision/ppyoloe/cpp/README.md | 39 --- model_zoo/vision/ppyoloe/ppyoloe.py | 24 -- model_zoo/vision/yolor/README.md | 67 +++++ model_zoo/vision/{ppyoloe => yolor}/api.md | 31 +-- .../{ppyoloe => yolor}/cpp/CMakeLists.txt | 6 +- model_zoo/vision/yolor/cpp/README.md | 51 ++++ .../cpp/ppyoloe.cc => yolor/cpp/yolor.cc} | 17 +- model_zoo/vision/yolor/yolor.py | 21 ++ setup.py | 27 +- 36 files changed, 679 insertions(+), 665 deletions(-) rename examples/vision/{ppdet_ppyoloe.cc => wongkinyiu_yolor.cc} (75%) delete mode 100644 fastdeploy/vision/common/processors/convert.cc delete mode 100644 fastdeploy/vision/common/processors/convert.h delete mode 100644 fastdeploy/vision/ppdet/__init__.py delete mode 100644 fastdeploy/vision/ppdet/ppdet_pybind.cc delete mode 100644 fastdeploy/vision/ppdet/ppyoloe.cc delete mode 100644 fastdeploy/vision/ppdet/ppyoloe.h create mode 100644 fastdeploy/vision/wongkinyiu/yolor.cc create mode 100644 fastdeploy/vision/wongkinyiu/yolor.h delete mode 100644 model_zoo/vision/ppyoloe/README.md delete mode 100644 model_zoo/vision/ppyoloe/cpp/README.md delete mode 100644 model_zoo/vision/ppyoloe/ppyoloe.py create mode 100644 model_zoo/vision/yolor/README.md rename model_zoo/vision/{ppyoloe => yolor}/api.md (56%) rename model_zoo/vision/{ppyoloe => yolor}/cpp/CMakeLists.txt (75%) create mode 100644 model_zoo/vision/yolor/cpp/README.md rename model_zoo/vision/{ppyoloe/cpp/ppyoloe.cc => yolor/cpp/yolor.cc} (66%) create mode 100644 model_zoo/vision/yolor/yolor.py diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 112193c86a..67361223c6 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,26 +1,25 @@ -function(add_fastdeploy_executable FIELD CC_FILE) +function(add_fastdeploy_executable field url model) # temp target name/file var in function scope - set(TEMP_TARGET_FILE ${CC_FILE}) - string(REGEX MATCHALL "[0-9A-Za-z_]*.cc" FILE_NAME ${CC_FILE}) - string(REGEX REPLACE ".cc" "" FILE_PREFIX ${FILE_NAME}) - set(TEMP_TARGET_NAME ${FIELD}_${FILE_PREFIX}) + set(TEMP_TARGET_FILE ${PROJECT_SOURCE_DIR}/examples/${field}/${url}_${model}.cc) + set(TEMP_TARGET_NAME ${field}_${url}_${model}) if (EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy) add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE}) target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy) - message(STATUS " Added FastDeploy Executable : ${TEMP_TARGET_NAME}") + message(STATUS "Found source file: [${field}/${url}_${model}.cc], ADD!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") + else () + message(WARNING "Can not found source file: [${field}/${url}_${model}.cc], SKIP!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") endif() unset(TEMP_TARGET_FILE) unset(TEMP_TARGET_NAME) endfunction() # vision examples -if(WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples/vision) - message(STATUS "") - message(STATUS "*************FastDeploy Examples Summary**********") - file(GLOB ALL_VISION_EXAMPLE_SRCS ${PROJECT_SOURCE_DIR}/examples/vision/*.cc) - foreach(_CC_FILE ${ALL_VISION_EXAMPLE_SRCS}) - add_fastdeploy_executable(vision ${_CC_FILE}) - endforeach() +if (WITH_VISION_EXAMPLES) + add_fastdeploy_executable(vision ultralytics yolov5) + add_fastdeploy_executable(vision meituan yolov6) + add_fastdeploy_executable(vision wongkinyiu yolov7) + add_fastdeploy_executable(vision megvii yolox) + add_fastdeploy_executable(vision wongkinyiu yolor) endif() # other examples ... diff --git a/examples/vision/ppdet_ppyoloe.cc b/examples/vision/wongkinyiu_yolor.cc similarity index 75% rename from examples/vision/ppdet_ppyoloe.cc rename to examples/vision/wongkinyiu_yolor.cc index b234021c92..abdca2b7ff 100644 --- a/examples/vision/ppdet_ppyoloe.cc +++ b/examples/vision/wongkinyiu_yolor.cc @@ -17,17 +17,18 @@ int main() { namespace vis = fastdeploy::vision; - std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; - std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; - std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; - std::string img_path = "test.jpeg"; - std::string vis_path = "vis.jpeg"; + std::string model_file = "../resources/models/yolor.onnx"; + std::string img_path = "../resources/images/horses.jpg"; + std::string vis_path = "../resources/outputs/wongkinyiu_yolor_vis_result.jpg"; - auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + auto model = vis::wongkinyiu::YOLOR(model_file); if (!model.Initialized()) { - std::cerr << "Init Failed." << std::endl; + std::cerr << "Init Failed! Model: " << model_file << std::endl; return -1; + } else { + std::cout << "Init Done! Model:" << model_file << std::endl; } + model.EnableDebug(); cv::Mat im = cv::imread(img_path); cv::Mat vis_im = im.clone(); diff --git a/fastdeploy/__init__.py b/fastdeploy/__init__.py index 68006c1bed..500e7cc42a 100644 --- a/fastdeploy/__init__.py +++ b/fastdeploy/__init__.py @@ -17,7 +17,7 @@ from .fastdeploy_runtime import * from . import fastdeploy_main as C from . import vision -from .download import download, download_and_decompress +from .download import download def TensorInfoStr(tensor_info): diff --git a/fastdeploy/download.py b/fastdeploy/download.py index 67f21d8e76..e00af098df 100644 --- a/fastdeploy/download.py +++ b/fastdeploy/download.py @@ -156,7 +156,7 @@ def decompress(fname): def url2dir(url, path, rename=None): full_name = download(url, path, rename, show_progress=True) - print("File is donwloaded, now extracting...") + print("SDK is donwloaded, now extracting...") if url.count(".tgz") > 0 or url.count(".tar") > 0 or url.count("zip") > 0: return decompress(full_name) diff --git a/fastdeploy/utils/utils.h b/fastdeploy/utils/utils.h index 9312084265..23ca6ee51a 100644 --- a/fastdeploy/utils/utils.h +++ b/fastdeploy/utils/utils.h @@ -26,10 +26,10 @@ #define FASTDEPLOY_DECL __declspec(dllexport) #else #define FASTDEPLOY_DECL __declspec(dllimport) -#endif // FASTDEPLOY_LIB +#endif // FASTDEPLOY_LIB #else #define FASTDEPLOY_DECL __attribute__((visibility("default"))) -#endif // _WIN32 +#endif // _WIN32 namespace fastdeploy { @@ -42,7 +42,8 @@ class FASTDEPLOY_DECL FDLogger { } explicit FDLogger(bool verbose, const std::string& prefix = "[FastDeploy]"); - template FDLogger& operator<<(const T& val) { + template + FDLogger& operator<<(const T& val) { if (!verbose_) { return *this; } @@ -64,18 +65,14 @@ class FASTDEPLOY_DECL FDLogger { bool verbose_ = true; }; -#ifndef __REL_FILE__ -#define __REL_FILE__ __FILE__ -#endif +#define FDERROR \ + FDLogger(true, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \ + << ")::" << __FUNCTION__ << "\t" -#define FDERROR \ - FDLogger(true, "[ERROR]") \ - << __REL_FILE__ << "(" << __LINE__ << ")::" << __FUNCTION__ << "\t" - -#define FDASSERT(condition, message) \ - if (!(condition)) { \ - FDERROR << message << std::endl; \ - std::abort(); \ +#define FDASSERT(condition, message) \ + if (!(condition)) { \ + FDERROR << message << std::endl; \ + std::abort(); \ } -} // namespace fastdeploy +} // namespace fastdeploy diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index cafe310c70..4398463251 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -15,12 +15,12 @@ #include "fastdeploy/core/config.h" #ifdef ENABLE_VISION +#include "fastdeploy/vision/megvii/yolox.h" +#include "fastdeploy/vision/meituan/yolov6.h" #include "fastdeploy/vision/ppcls/model.h" -#include "fastdeploy/vision/ppdet/ppyoloe.h" #include "fastdeploy/vision/ultralytics/yolov5.h" +#include "fastdeploy/vision/wongkinyiu/yolor.h" #include "fastdeploy/vision/wongkinyiu/yolov7.h" -#include "fastdeploy/vision/meituan/yolov6.h" -#include "fastdeploy/vision/megvii/yolox.h" #endif #include "fastdeploy/vision/visualize/visualize.h" diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 6acbf0c376..7122bede0b 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -15,7 +15,6 @@ from . import evaluation from . import ppcls -from . import ppdet from . import ultralytics from . import meituan from . import megvii diff --git a/fastdeploy/vision/common/processors/convert.cc b/fastdeploy/vision/common/processors/convert.cc deleted file mode 100644 index a7ca6de07a..0000000000 --- a/fastdeploy/vision/common/processors/convert.cc +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "fastdeploy/vision/common/processors/convert.h" - -namespace fastdeploy { - -namespace vision { - -Convert::Convert(const std::vector& alpha, - const std::vector& beta) { - FDASSERT(alpha.size() == beta.size(), - "Convert: requires the size of alpha equal to the size of beta."); - FDASSERT(alpha.size() != 0, - "Convert: requires the size of alpha and beta > 0."); - alpha_.assign(alpha.begin(), alpha.end()); - beta_.assign(beta.begin(), beta.end()); -} - -bool Convert::CpuRun(Mat* mat) { - cv::Mat* im = mat->GetCpuMat(); - std::vector split_im; - cv::split(*im, split_im); - for (int c = 0; c < im->channels(); c++) { - split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); - } - cv::merge(split_im, *im); - return true; -} - -#ifdef ENABLE_OPENCV_CUDA -bool Convert::GpuRun(Mat* mat) { - cv::cuda::GpuMat* im = mat->GetGpuMat(); - std::vector split_im; - cv::cuda::split(*im, split_im); - for (int c = 0; c < im->channels(); c++) { - split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); - } - cv::cuda::merge(split_im, *im); - return true; -} -#endif - -bool Convert::Run(Mat* mat, const std::vector& alpha, - const std::vector& beta, ProcLib lib) { - auto c = Convert(alpha, beta); - return c(mat, lib); -} - -} // namespace vision -} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/common/processors/convert.h b/fastdeploy/vision/common/processors/convert.h deleted file mode 100644 index 5d5a5276f5..0000000000 --- a/fastdeploy/vision/common/processors/convert.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "fastdeploy/vision/common/processors/base.h" - -namespace fastdeploy { -namespace vision { -class Convert : public Processor { - public: - Convert(const std::vector& alpha, const std::vector& beta); - - bool CpuRun(Mat* mat); -#ifdef ENABLE_OPENCV_CUDA - bool GpuRun(Mat* mat); -#endif - std::string Name() { return "Convert"; } - - // Compute `result = mat * alpha + beta` directly by channel. - // The default behavior is the same as OpenCV's convertTo method. - static bool Run(Mat* mat, const std::vector& alpha, - const std::vector& beta, - ProcLib lib = ProcLib::OPENCV_CPU); - - private: - std::vector alpha_; - std::vector beta_; -}; -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 08073b4e42..12eec8d72d 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -17,7 +17,6 @@ #include "fastdeploy/vision/common/processors/cast.h" #include "fastdeploy/vision/common/processors/center_crop.h" #include "fastdeploy/vision/common/processors/color_space_convert.h" -#include "fastdeploy/vision/common/processors/convert.h" #include "fastdeploy/vision/common/processors/hwc2chw.h" #include "fastdeploy/vision/common/processors/normalize.h" #include "fastdeploy/vision/common/processors/pad.h" diff --git a/fastdeploy/vision/meituan/yolov6.cc b/fastdeploy/vision/meituan/yolov6.cc index 8ac7377194..b75f2016ee 100644 --- a/fastdeploy/vision/meituan/yolov6.cc +++ b/fastdeploy/vision/meituan/yolov6.cc @@ -129,12 +129,8 @@ bool YOLOv6::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - // std::vector(mat->Channels(), 1.0)); - // Compute `result = mat * alpha + beta` directly by channel - std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; - std::vector beta = {0.0f, 0.0f, 0.0f}; - Convert::Run(mat, alpha, beta); + Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + std::vector(mat->Channels(), 1.0)); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), diff --git a/fastdeploy/vision/ppcls/model.cc b/fastdeploy/vision/ppcls/model.cc index c4e5b767c7..915cb97512 100644 --- a/fastdeploy/vision/ppcls/model.cc +++ b/fastdeploy/vision/ppcls/model.cc @@ -1,16 +1,3 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/utils/utils.h" @@ -148,6 +135,6 @@ bool Model::Predict(cv::Mat* im, ClassifyResult* result, int topk) { return true; } -} // namespace ppcls -} // namespace vision -} // namespace fastdeploy +} // namespace ppcls +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppcls/model.h b/fastdeploy/vision/ppcls/model.h index 265f92d32b..fae99d4f3c 100644 --- a/fastdeploy/vision/ppcls/model.h +++ b/fastdeploy/vision/ppcls/model.h @@ -1,17 +1,3 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - #pragma once #include "fastdeploy/fastdeploy_model.h" #include "fastdeploy/vision/common/processors/transform.h" @@ -46,6 +32,6 @@ class FASTDEPLOY_DECL Model : public FastDeployModel { std::vector> processors_; std::string config_file_; }; -} // namespace ppcls -} // namespace vision -} // namespace fastdeploy +} // namespace ppcls +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppcls/ppcls_pybind.cc b/fastdeploy/vision/ppcls/ppcls_pybind.cc index 1abc0b2b7c..828bef3c7a 100644 --- a/fastdeploy/vision/ppcls/ppcls_pybind.cc +++ b/fastdeploy/vision/ppcls/ppcls_pybind.cc @@ -14,7 +14,7 @@ #include "fastdeploy/pybind/main.h" namespace fastdeploy { -void BindPPCls(pybind11::module& m) { +void BindPpClsModel(pybind11::module& m) { auto ppcls_module = m.def_submodule("ppcls", "Module to deploy PaddleClas."); pybind11::class_(ppcls_module, "Model") .def(pybind11::init(ppdet_module, - "PPYOLOE") - .def(pybind11::init()) - .def("predict", [](vision::ppdet::PPYOLOE& self, pybind11::array& data, - float conf_threshold, float nms_iou_threshold) { - auto mat = PyArrayToCvMat(data); - vision::DetectionResult res; - self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); - return res; - }); -} -} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.cc b/fastdeploy/vision/ppdet/ppyoloe.cc deleted file mode 100644 index c215ecb0ca..0000000000 --- a/fastdeploy/vision/ppdet/ppyoloe.cc +++ /dev/null @@ -1,170 +0,0 @@ -#include "fastdeploy/vision/ppdet/ppyoloe.h" -#include "fastdeploy/vision/utils/utils.h" -#include "yaml-cpp/yaml.h" - -namespace fastdeploy { -namespace vision { -namespace ppdet { - -PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file, - const std::string& config_file, - const RuntimeOption& custom_option, - const Frontend& model_format) { - config_file_ = config_file; - valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; - valid_gpu_backends = {Backend::ORT, Backend::PDINFER}; - runtime_option = custom_option; - runtime_option.model_format = model_format; - runtime_option.model_file = model_file; - runtime_option.params_file = params_file; - initialized = Initialize(); -} - -bool PPYOLOE::Initialize() { - if (!BuildPreprocessPipelineFromConfig()) { - std::cout << "Failed to build preprocess pipeline from configuration file." - << std::endl; - return false; - } - if (!InitRuntime()) { - std::cout << "Failed to initialize fastdeploy backend." << std::endl; - return false; - } - return true; -} - -bool PPYOLOE::BuildPreprocessPipelineFromConfig() { - processors_.clear(); - YAML::Node cfg; - try { - cfg = YAML::LoadFile(config_file_); - } catch (YAML::BadFile& e) { - std::cout << "Failed to load yaml file " << config_file_ - << ", maybe you should check this file." << std::endl; - return false; - } - - if (cfg["arch"].as() != "YOLO") { - std::cout << "Require the arch of model is YOLO, but arch defined in " - "config file is " - << cfg["arch"].as() << "." << std::endl; - return false; - } - processors_.push_back(std::make_shared()); - - for (const auto& op : cfg["Preprocess"]) { - std::string op_name = op["type"].as(); - if (op_name == "NormalizeImage") { - auto mean = op["mean"].as>(); - auto std = op["std"].as>(); - bool is_scale = op["is_scale"].as(); - processors_.push_back(std::make_shared(mean, std, is_scale)); - } else if (op_name == "Resize") { - bool keep_ratio = op["keep_ratio"].as(); - auto target_size = op["target_size"].as>(); - int interp = op["interp"].as(); - FDASSERT(target_size.size(), - "Require size of target_size be 2, but now it's " + - std::to_string(target_size.size()) + "."); - FDASSERT(!keep_ratio, - "Only support keep_ratio is false while deploy " - "PaddleDetection model."); - int width = target_size[1]; - int height = target_size[0]; - processors_.push_back( - std::make_shared(width, height, -1.0, -1.0, interp, false)); - } else if (op_name == "Permute") { - processors_.push_back(std::make_shared()); - } else { - std::cout << "Unexcepted preprocess operator: " << op_name << "." - << std::endl; - return false; - } - } - return true; -} - -bool PPYOLOE::Preprocess(Mat* mat, std::vector* outputs) { - int origin_w = mat->Width(); - int origin_h = mat->Height(); - for (size_t i = 0; i < processors_.size(); ++i) { - if (!(*(processors_[i].get()))(mat)) { - std::cout << "Failed to process image data in " << processors_[i]->Name() - << "." << std::endl; - return false; - } - } - - outputs->resize(2); - (*outputs)[0].name = InputInfoOfRuntime(0).name; - mat->ShareWithTensor(&((*outputs)[0])); - - // reshape to [1, c, h, w] - (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1); - - (*outputs)[1].Allocate({1, 2}, FDDataType::FP32, InputInfoOfRuntime(1).name); - float* ptr = static_cast((*outputs)[1].MutableData()); - ptr[0] = mat->Height() * 1.0 / mat->Height(); - ptr[1] = mat->Width() * 1.0 / mat->Width(); - return true; -} - -bool PPYOLOE::Postprocess(std::vector& infer_result, - DetectionResult* result, float conf_threshold, - float nms_threshold) { - FDASSERT(infer_result[1].shape[0] == 1, - "Only support batch = 1 in FastDeploy now."); - int box_num = 0; - if (infer_result[1].dtype == FDDataType::INT32) { - box_num = *(static_cast(infer_result[1].Data())); - } else if (infer_result[1].dtype == FDDataType::INT64) { - box_num = *(static_cast(infer_result[1].Data())); - } else { - FDASSERT( - false, - "The output box_num of PPYOLOE model should be type of int32/int64."); - } - result->Reserve(box_num); - float* box_data = static_cast(infer_result[0].Data()); - for (size_t i = 0; i < box_num; ++i) { - if (box_data[i * 6 + 1] < conf_threshold) { - continue; - } - result->label_ids.push_back(box_data[i * 6]); - result->scores.push_back(box_data[i * 6 + 1]); - result->boxes.emplace_back( - std::array{box_data[i * 6 + 2], box_data[i * 6 + 3], - box_data[i * 6 + 4] - box_data[i * 6 + 2], - box_data[i * 6 + 5] - box_data[i * 6 + 3]}); - } - return true; -} - -bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result, - float conf_threshold, float iou_threshold) { - Mat mat(*im); - std::vector processed_data; - if (!Preprocess(&mat, &processed_data)) { - FDERROR << "Failed to preprocess input data while using model:" - << ModelName() << "." << std::endl; - return false; - } - - std::vector infer_result; - if (!Infer(processed_data, &infer_result)) { - FDERROR << "Failed to inference while using model:" << ModelName() << "." - << std::endl; - return false; - } - - if (!Postprocess(infer_result, result, conf_threshold, iou_threshold)) { - FDERROR << "Failed to postprocess while using model:" << ModelName() << "." - << std::endl; - return false; - } - return true; -} - -} // namespace ppdet -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.h b/fastdeploy/vision/ppdet/ppyoloe.h deleted file mode 100644 index a3db268ca4..0000000000 --- a/fastdeploy/vision/ppdet/ppyoloe.h +++ /dev/null @@ -1,44 +0,0 @@ -#pragma once -#include "fastdeploy/fastdeploy_model.h" -#include "fastdeploy/vision/common/processors/transform.h" -#include "fastdeploy/vision/common/result.h" - -#include "fastdeploy/vision/utils/utils.h" - -namespace fastdeploy { -namespace vision { -namespace ppdet { - -class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { - public: - PPYOLOE(const std::string& model_file, const std::string& params_file, - const std::string& config_file, - const RuntimeOption& custom_option = RuntimeOption(), - const Frontend& model_format = Frontend::PADDLE); - - std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } - - virtual bool Initialize(); - - virtual bool BuildPreprocessPipelineFromConfig(); - - virtual bool Preprocess(Mat* mat, std::vector* outputs); - - virtual bool Postprocess(std::vector& infer_result, - DetectionResult* result, float conf_threshold, - float nms_threshold); - - virtual bool Predict(cv::Mat* im, DetectionResult* result, - float conf_threshold = 0.5, float nms_threshold = 0.7); - - private: - std::vector> processors_; - std::string config_file_; - // PaddleDetection can export model without nms - // This flag will help us to handle the different - // situation - bool has_nms_; -}; -} // namespace ppdet -} // namespace vision -} // namespace fastdeploy diff --git a/fastdeploy/vision/ultralytics/yolov5.cc b/fastdeploy/vision/ultralytics/yolov5.cc index 0b7e50e735..c8c6e06a94 100644 --- a/fastdeploy/vision/ultralytics/yolov5.cc +++ b/fastdeploy/vision/ultralytics/yolov5.cc @@ -126,12 +126,8 @@ bool YOLOv5::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - // std::vector(mat->Channels(), 1.0)); - // Compute `result = mat * alpha + beta` directly by channel - std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; - std::vector beta = {0.0f, 0.0f, 0.0f}; - Convert::Run(mat, alpha, beta); + Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + std::vector(mat->Channels(), 1.0)); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), @@ -202,11 +198,6 @@ bool YOLOv5::Postprocess( result->scores.push_back(confidence); } } - - if (result->boxes.size() == 0) { - return true; - } - utils::NMS(result, nms_iou_threshold); // scale the boxes to the origin image shape diff --git a/fastdeploy/vision/utils/sort_det_res.cc b/fastdeploy/vision/utils/sort_det_res.cc index 93dbb69694..e4a0db9761 100644 --- a/fastdeploy/vision/utils/sort_det_res.cc +++ b/fastdeploy/vision/utils/sort_det_res.cc @@ -68,11 +68,7 @@ void MergeSort(DetectionResult* result, size_t low, size_t high) { void SortDetectionResult(DetectionResult* result) { size_t low = 0; - size_t high = result->scores.size(); - if (high == 0) { - return; - } - high = high - 1; + size_t high = result->scores.size() - 1; MergeSort(result, low, high); } diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index 0334303ce6..e4ba05b893 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -16,8 +16,7 @@ namespace fastdeploy { -void BindPPCls(pybind11::module& m); -void BindPPDet(pybind11::module& m); +void BindPpClsModel(pybind11::module& m); void BindWongkinyiu(pybind11::module& m); void BindUltralytics(pybind11::module& m); void BindMeituan(pybind11::module& m); @@ -42,8 +41,7 @@ void BindVision(pybind11::module& m) { .def("__repr__", &vision::DetectionResult::Str) .def("__str__", &vision::DetectionResult::Str); - BindPPCls(m); - BindPPDet(m); + BindPpClsModel(m); BindUltralytics(m); BindWongkinyiu(m); BindMeituan(m); diff --git a/fastdeploy/vision/visualize/detection.cc b/fastdeploy/vision/visualize/detection.cc index 5b5538bff7..e5f01bdd35 100644 --- a/fastdeploy/vision/visualize/detection.cc +++ b/fastdeploy/vision/visualize/detection.cc @@ -43,7 +43,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, } std::string text = id + "," + score; int font = cv::FONT_HERSHEY_SIMPLEX; - cv::Size text_size = cv::getTextSize(text, font, font_size, 1, nullptr); + cv::Size text_size = cv::getTextSize(text, font, font_size, 0.5, nullptr); cv::Point origin; origin.x = rect.x; origin.y = rect.y; @@ -52,10 +52,10 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, text_size.width, text_size.height); cv::rectangle(*im, rect, rect_color, line_size); cv::putText(*im, text, origin, font, font_size, cv::Scalar(255, 255, 255), - 1); + 0.5); } } -} // namespace vision -} // namespace fastdeploy +} // namespace vision +} // namespace fastdeploy #endif diff --git a/fastdeploy/vision/wongkinyiu/__init__.py b/fastdeploy/vision/wongkinyiu/__init__.py index 542389e208..026d10062f 100644 --- a/fastdeploy/vision/wongkinyiu/__init__.py +++ b/fastdeploy/vision/wongkinyiu/__init__.py @@ -114,3 +114,101 @@ def max_wh(self, value): assert isinstance( value, float), "The value to set `max_wh` must be type of float." self._model.max_wh = value + + +class YOLOR(FastDeployModel): + def __init__(self, + model_file, + params_file="", + runtime_option=None, + model_format=Frontend.ONNX): + # 调用基函数进行backend_option的初始化 + # 初始化后的option保存在self._runtime_option + super(YOLOR, self).__init__(runtime_option) + + self._model = C.vision.wongkinyiu.YOLOR( + model_file, params_file, self._runtime_option, model_format) + # 通过self.initialized判断整个模型的初始化是否成功 + assert self.initialized, "YOLOR initialize failed." + + def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): + return self._model.predict(input_image, conf_threshold, + nms_iou_threshold) + + # 一些跟YOLOv7模型有关的属性封装 + # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) + @property + def size(self): + return self._model.size + + @property + def padding_value(self): + return self._model.padding_value + + @property + def is_no_pad(self): + return self._model.is_no_pad + + @property + def is_mini_pad(self): + return self._model.is_mini_pad + + @property + def is_scale_up(self): + return self._model.is_scale_up + + @property + def stride(self): + return self._model.stride + + @property + def max_wh(self): + return self._model.max_wh + + @size.setter + def size(self, wh): + assert isinstance(wh, [list, tuple]),\ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2,\ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self._model.size = wh + + @padding_value.setter + def padding_value(self, value): + assert isinstance( + value, + list), "The value to set `padding_value` must be type of list." + self._model.padding_value = value + + @is_no_pad.setter + def is_no_pad(self, value): + assert isinstance( + value, bool), "The value to set `is_no_pad` must be type of bool." + self._model.is_no_pad = value + + @is_mini_pad.setter + def is_mini_pad(self, value): + assert isinstance( + value, + bool), "The value to set `is_mini_pad` must be type of bool." + self._model.is_mini_pad = value + + @is_scale_up.setter + def is_scale_up(self, value): + assert isinstance( + value, + bool), "The value to set `is_scale_up` must be type of bool." + self._model.is_scale_up = value + + @stride.setter + def stride(self, value): + assert isinstance( + value, int), "The value to set `stride` must be type of int." + self._model.stride = value + + @max_wh.setter + def max_wh(self, value): + assert isinstance( + value, float), "The value to set `max_wh` must be type of float." + self._model.max_wh = value diff --git a/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc b/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc index 4a10f47a76..6bde2a1841 100644 --- a/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc +++ b/fastdeploy/vision/wongkinyiu/wongkinyiu_pybind.cc @@ -17,7 +17,7 @@ namespace fastdeploy { void BindWongkinyiu(pybind11::module& m) { auto wongkinyiu_module = - m.def_submodule("wongkinyiu", "https://github.com/WongKinYiu/yolov7"); + m.def_submodule("wongkinyiu", "https://github.com/WongKinYiu"); pybind11::class_( wongkinyiu_module, "YOLOv7") .def(pybind11::init()) @@ -37,5 +37,24 @@ void BindWongkinyiu(pybind11::module& m) { .def_readwrite("is_scale_up", &vision::wongkinyiu::YOLOv7::is_scale_up) .def_readwrite("stride", &vision::wongkinyiu::YOLOv7::stride) .def_readwrite("max_wh", &vision::wongkinyiu::YOLOv7::max_wh); + + pybind11::class_( + wongkinyiu_module, "YOLOR") + .def(pybind11::init()) + .def("predict", + [](vision::wongkinyiu::YOLOR& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }) + .def_readwrite("size", &vision::wongkinyiu::YOLOR::size) + .def_readwrite("padding_value", &vision::wongkinyiu::YOLOR::padding_value) + .def_readwrite("is_mini_pad", &vision::wongkinyiu::YOLOR::is_mini_pad) + .def_readwrite("is_no_pad", &vision::wongkinyiu::YOLOR::is_no_pad) + .def_readwrite("is_scale_up", &vision::wongkinyiu::YOLOR::is_scale_up) + .def_readwrite("stride", &vision::wongkinyiu::YOLOR::stride) + .def_readwrite("max_wh", &vision::wongkinyiu::YOLOR::max_wh); } } // namespace fastdeploy diff --git a/fastdeploy/vision/wongkinyiu/yolor.cc b/fastdeploy/vision/wongkinyiu/yolor.cc new file mode 100644 index 0000000000..5cf9d6cb83 --- /dev/null +++ b/fastdeploy/vision/wongkinyiu/yolor.cc @@ -0,0 +1,243 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/wongkinyiu/yolor.h" +#include "fastdeploy/utils/perf.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace wongkinyiu { + +void YOLOR::LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill, bool scale_up, int stride) { + float scale = + std::min(size[1] * 1.0 / mat->Height(), size[0] * 1.0 / mat->Width()); + if (!scale_up) { + scale = std::min(scale, 1.0f); + } + + int resize_h = int(round(mat->Height() * scale)); + int resize_w = int(round(mat->Width() * scale)); + + int pad_w = size[0] - resize_w; + int pad_h = size[1] - resize_h; + if (_auto) { + pad_h = pad_h % stride; + pad_w = pad_w % stride; + } else if (scale_fill) { + pad_h = 0; + pad_w = 0; + resize_h = size[1]; + resize_w = size[0]; + } + Resize::Run(mat, resize_w, resize_h); + if (pad_h > 0 || pad_w > 0) { + float half_h = pad_h * 1.0 / 2; + int top = int(round(half_h - 0.1)); + int bottom = int(round(half_h + 0.1)); + float half_w = pad_w * 1.0 / 2; + int left = int(round(half_w - 0.1)); + int right = int(round(half_w + 0.1)); + Pad::Run(mat, top, bottom, left, right, color); + } +} + +YOLOR::YOLOR(const std::string& model_file, const std::string& params_file, + const RuntimeOption& custom_option, const Frontend& model_format) { + if (model_format == Frontend::ONNX) { + valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 + valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 + } else { + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOR::Initialize() { + // parameters for preprocess + size = {640, 640}; + padding_value = {114.0, 114.0, 114.0}; + is_mini_pad = false; + is_no_pad = false; + is_scale_up = false; + stride = 32; + max_wh = 7680.0; + + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool YOLOR::Preprocess(Mat* mat, FDTensor* output, + std::map>* im_info) { + // process after image load + double ratio = (size[0] * 1.0) / std::max(static_cast(mat->Height()), + static_cast(mat->Width())); + if (ratio != 1.0) { + int interp = cv::INTER_AREA; + if (ratio > 1.0) { + interp = cv::INTER_LINEAR; + } + int resize_h = int(mat->Height() * ratio); + int resize_w = int(mat->Width() * ratio); + Resize::Run(mat, resize_w, resize_h, -1, -1, interp); + } + // yolor's preprocess steps + // 1. letterbox + // 2. BGR->RGB + // 3. HWC->CHW + YOLOR::LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, + is_scale_up, stride); + BGR2RGB::Run(mat); + Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + std::vector(mat->Channels(), 1.0)); + + // Record output shape of preprocessed image + (*im_info)["output_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + HWC2CHW::Run(mat); + Cast::Run(mat, "float"); + mat->ShareWithTensor(output); + output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c + return true; +} + +bool YOLOR::Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + int s = i * infer_result.shape[2]; + float confidence = data[s + 4]; + float* max_class_score = + std::max_element(data + s + 5, data + s + infer_result.shape[2]); + confidence *= (*max_class_score); + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + int32_t label_id = std::distance(data + s + 5, max_class_score); + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + data[s] - data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh, + data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh}); + result->label_ids.push_back(label_id); + result->scores.push_back(confidence); + } + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float scale = std::min(out_h / ipt_h, out_w / ipt_w); + for (size_t i = 0; i < result->boxes.size(); ++i) { + float pad_h = (out_h - ipt_h * scale) / 2; + float pad_w = (out_w - ipt_w * scale) / 2; + int32_t label_id = (result->label_ids)[i]; + // clip box + result->boxes[i][0] = result->boxes[i][0] - max_wh * label_id; + result->boxes[i][1] = result->boxes[i][1] - max_wh * label_id; + result->boxes[i][2] = result->boxes[i][2] - max_wh * label_id; + result->boxes[i][3] = result->boxes[i][3] - max_wh * label_id; + result->boxes[i][0] = std::max((result->boxes[i][0] - pad_w) / scale, 0.0f); + result->boxes[i][1] = std::max((result->boxes[i][1] - pad_h) / scale, 0.0f); + result->boxes[i][2] = std::max((result->boxes[i][2] - pad_w) / scale, 0.0f); + result->boxes[i][3] = std::max((result->boxes[i][3] - pad_h) / scale, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h); + } + return true; +} + +bool YOLOR::Predict(cv::Mat* im, DetectionResult* result, float conf_threshold, + float nms_iou_threshold) { +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_START(0) +#endif + + Mat mat(*im); + std::vector input_tensors(1); + + std::map> im_info; + + // Record the shape of image and the shape of preprocessed image + im_info["input_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + im_info["output_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + + if (!Preprocess(&mat, &input_tensors[0], &im_info)) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(0, "Preprocess") + TIMERECORD_START(1) +#endif + + input_tensors[0].name = InputInfoOfRuntime(0).name; + std::vector output_tensors; + if (!Infer(input_tensors, &output_tensors)) { + FDERROR << "Failed to inference." << std::endl; + return false; + } +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(1, "Inference") + TIMERECORD_START(2) +#endif + + if (!Postprocess(output_tensors[0], result, im_info, conf_threshold, + nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(2, "Postprocess") +#endif + return true; +} + +} // namespace wongkinyiu +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/wongkinyiu/yolor.h b/fastdeploy/vision/wongkinyiu/yolor.h new file mode 100644 index 0000000000..69f5ea8760 --- /dev/null +++ b/fastdeploy/vision/wongkinyiu/yolor.h @@ -0,0 +1,95 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { +namespace vision { +namespace wongkinyiu { + +class FASTDEPLOY_DECL YOLOR : public FastDeployModel { + public: + // 当model_format为ONNX时,无需指定params_file + // 当model_format为Paddle时,则需同时指定model_file & params_file + YOLOR(const std::string& model_file, const std::string& params_file = "", + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX); + + // 定义模型的名称 + virtual std::string ModelName() const { return "WongKinYiu/yolor"; } + + // 模型预测接口,即用户调用的接口 + // im 为用户的输入数据,目前对于CV均定义为cv::Mat + // result 为模型预测的输出结构体 + // conf_threshold 为后处理的参数 + // nms_iou_threshold 为后处理的参数 + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.25, + float nms_iou_threshold = 0.5); + + // 以下为模型在预测时的一些参数,基本是前后处理所需 + // 用户在创建模型后,可根据模型的要求,以及自己的需求 + // 对参数进行修改 + // tuple of (width, height) + std::vector size; + // padding value, size should be same with Channels + std::vector padding_value; + // only pad to the minimum rectange which height and width is times of stride + bool is_mini_pad; + // while is_mini_pad = false and is_no_pad = true, will resize the image to + // the set size + bool is_no_pad; + // if is_scale_up is false, the input image only can be zoom out, the maximum + // resize scale cannot exceed 1.0 + bool is_scale_up; + // padding stride, for is_mini_pad + int stride; + // for offseting the boxes by classes when using NMS + float max_wh; + + private: + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + bool Postprocess(FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 对图片进行LetterBox处理 + // mat 为读取到的原图 + // size 为输入模型的图像尺寸 + void LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill = false, bool scale_up = true, + int stride = 32); +}; +} // namespace wongkinyiu +} // namespace vision +} // namespace fastdeploy diff --git a/model_zoo/vision/ppyoloe/README.md b/model_zoo/vision/ppyoloe/README.md deleted file mode 100644 index 42d18104ad..0000000000 --- a/model_zoo/vision/ppyoloe/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# PaddleDetection/PPYOLOE部署示例 - -- 当前支持PaddleDetection版本为[release/2.4](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4) - -本文档说明如何进行[PPYOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)的快速部署推理。本目录结构如下 -``` -. -├── cpp # C++ 代码目录 -│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 -│   ├── README.md # C++ 代码编译部署文档 -│   └── ppyoloe.cc # C++ 示例代码 -├── README.md # PPYOLOE 部署文档 -└── ppyoloe.py # Python示例代码 -``` - -## 安装FastDeploy - -使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` -``` -# 安装fastdeploy-python工具 -pip install fastdeploy-python -``` - -## Python部署 - -执行如下代码即会自动下载PPYOLOE模型和测试图片 -``` -python ppyoloe.py -``` - -执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 -``` -DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] -162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 -414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 -163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 -267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 -581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 -104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 -348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 -364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 -75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 -328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 -504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 -379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 -25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 -``` - -## 其它文档 - -- [C++部署](./cpp/README.md) -- [PPYOLOE API文档](./api.md) diff --git a/model_zoo/vision/ppyoloe/cpp/README.md b/model_zoo/vision/ppyoloe/cpp/README.md deleted file mode 100644 index 1027c2eeb2..0000000000 --- a/model_zoo/vision/ppyoloe/cpp/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# 编译PPYOLOE示例 - - -``` -# 下载和解压预测库 -wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz -tar xvf fastdeploy-linux-x64-0.0.3.tgz - -# 编译示例代码 -mkdir build & cd build -cmake .. -make -j - -# 下载模型和图片 -wget https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz -tar xvf ppyoloe_crn_l_300e_coco.tgz -wget https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg - -# 执行 -./ppyoloe_demo -``` - -执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 -``` -DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] -162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 -414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 -163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 -267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 -581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 -104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 -348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 -364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 -75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 -328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 -504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 -379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 -25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 -``` diff --git a/model_zoo/vision/ppyoloe/ppyoloe.py b/model_zoo/vision/ppyoloe/ppyoloe.py deleted file mode 100644 index 7d79dfd8cf..0000000000 --- a/model_zoo/vision/ppyoloe/ppyoloe.py +++ /dev/null @@ -1,24 +0,0 @@ -import fastdeploy as fd -import cv2 - -# 下载模型和测试图片 -model_url = "https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz" -test_jpg_url = "https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg" -fd.download_and_decompress(model_url, ".") -fd.download(test_jpg_url, ".", show_progress=True) - -# 加载模型 -model = fd.vision.ppdet.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel", - "ppyoloe_crn_l_300e_coco/model.pdiparams", - "ppyoloe_crn_l_300e_coco/infer_cfg.yml") - -# 预测图片 -im = cv2.imread("000000014439_640x640.jpg") -result = model.predict(im, conf_threshold=0.5) - -# 可视化结果 -fd.vision.visualize.vis_detection(im, result) -cv2.imwrite("vis_result.jpg", im) - -# 输出预测结果 -print(result) diff --git a/model_zoo/vision/yolor/README.md b/model_zoo/vision/yolor/README.md new file mode 100644 index 0000000000..467023f169 --- /dev/null +++ b/model_zoo/vision/yolor/README.md @@ -0,0 +1,67 @@ +# 编译YOLOR示例 + +当前支持模型版本为:[YOLOR v0.1](https://github.com/WongKinYiu/yolor/releases/tag/weights) + +本文档说明如何进行[YOLOR](https://github.com/WongKinYiu/yolor)的快速部署推理。本目录结构如下 + +``` +. +├── cpp +│   ├── CMakeLists.txt +│   ├── README.md +│   └── yolor.cc +├── README.md +└── yolor.py +``` + +## 获取ONNX文件 + +- 手动获取 + + 访问[YOLOR](https://github.com/WongKinYiu/yolor)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + + + + ``` + #下载yolor模型文件 + wget https://github.com/WongKinYiu/yolor/releases/download/v0.1/yolor.pt + + # 导出onnx格式文件 + python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolor/ + ``` + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` + +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python + +# 安装vision-cpu模块 +fastdeploy install vision-cpu +``` +## Python部署 + +执行如下代码即会自动下载测试图片 +``` +python yolor.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +0.000000,185.201431, 315.673126, 410.071594, 0.959289, 17 +433.802826,211.603455, 595.489319, 346.425537, 0.952615, 17 +230.446854,195.618805, 418.365479, 362.712128, 0.884253, 17 +336.545624,208.555618, 457.704315, 323.543152, 0.788450, 17 +0.896423,183.936996, 154.788727, 304.916412, 0.672804, 17 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [YOLOR API文档](./api.md) diff --git a/model_zoo/vision/ppyoloe/api.md b/model_zoo/vision/yolor/api.md similarity index 56% rename from model_zoo/vision/ppyoloe/api.md rename to model_zoo/vision/yolor/api.md index 1c5cbcaadb..b1e5be889b 100644 --- a/model_zoo/vision/ppyoloe/api.md +++ b/model_zoo/vision/yolor/api.md @@ -1,24 +1,23 @@ -# PPYOLOE API说明 +# YOLOR API说明 ## Python API -### PPYOLOE类 +### YOLOR类 ``` -fastdeploy.vision.ultralytics.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=fd.Frontend.PADDLE) +fastdeploy.vision.wongkinyiu.YOLOR(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) ``` -PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 +YOLOR模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolor.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 **参数** > * **model_file**(str): 模型文件路径 > * **params_file**(str): 参数文件路径 -> * **config_file**(str): 模型推理配置文件 > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 > * **model_format**(Frontend): 模型格式 #### predict函数 > ``` -> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> YOLOR.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) > ``` > 模型预测结口,输入图像直接输出检测结果。 > @@ -26,35 +25,33 @@ PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当 > > > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 > > * **conf_threshold**(float): 检测框置信度过滤阈值 -> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 -示例代码参考[ppyoloe.py](./ppyoloe.py) +示例代码参考[yolor.py](./yolor.py) ## C++ API -### PPYOLOE类 +### YOLOR类 ``` -fastdeploy::vision::ultralytics::PPYOLOE( +fastdeploy::vision::wongkinyiu::YOLOR( const string& model_file, - const string& params_file, - const string& config_file, + const string& params_file = "", const RuntimeOption& runtime_option = RuntimeOption(), const Frontend& model_format = Frontend::ONNX) ``` -PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 +YOLOR模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolor.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 **参数** > * **model_file**(str): 模型文件路径 > * **params_file**(str): 参数文件路径 -> * **config_file**(str): 模型推理配置文件 > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 > * **model_format**(Frontend): 模型格式 #### Predict函数 > ``` -> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, +> YOLOR::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` @@ -65,9 +62,9 @@ PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当 > > * **im**: 输入图像,注意需为HWC,BGR格式 > > * **result**: 检测结果,包括检测框,各个框的置信度 > > * **conf_threshold**: 检测框置信度过滤阈值 -> > * **nms_iou_threshold**: NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 -示例代码参考[cpp/yolov5.cc](cpp/yolov5.cc) +示例代码参考[cpp/yolor.cc](cpp/yolor.cc) ## 其它API使用 diff --git a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt b/model_zoo/vision/yolor/cpp/CMakeLists.txt similarity index 75% rename from model_zoo/vision/ppyoloe/cpp/CMakeLists.txt rename to model_zoo/vision/yolor/cpp/CMakeLists.txt index e681566517..18248b8452 100644 --- a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt +++ b/model_zoo/vision/yolor/cpp/CMakeLists.txt @@ -1,4 +1,4 @@ -PROJECT(ppyoloe_demo C CXX) +PROJECT(yolor_demo C CXX) CMAKE_MINIMUM_REQUIRED (VERSION 3.16) # 在低版本ABI环境中,通过如下代码进行兼容性编译 @@ -12,6 +12,6 @@ include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) # 添加FastDeploy依赖头文件 include_directories(${FASTDEPLOY_INCS}) -add_executable(ppyoloe_demo ${PROJECT_SOURCE_DIR}/ppyoloe.cc) +add_executable(yolor_demo ${PROJECT_SOURCE_DIR}/yolor.cc) # 添加FastDeploy库依赖 -target_link_libraries(ppyoloe_demo ${FASTDEPLOY_LIBS}) +target_link_libraries(yolor_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/yolor/cpp/README.md b/model_zoo/vision/yolor/cpp/README.md new file mode 100644 index 0000000000..eddf5bc51b --- /dev/null +++ b/model_zoo/vision/yolor/cpp/README.md @@ -0,0 +1,51 @@ +# 编译YOLOR示例 + +当前支持模型版本为:[YOLOR v0.1](https://github.com/WongKinYiu/yolor/releases/tag/weights) + +## 获取ONNX文件 + +- 手动获取 + + 访问[YOLOR](https://github.com/WongKinYiu/yolor/releases/tag/weights)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + + ``` + #下载yolor模型文件 + wget https://github.com/WongKinYiu/yolor/releases/download/v0.1/yolor.pt + + # 导出onnx格式文件 + python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + + ``` + + +## 运行demo + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 移动onnx文件到demo目录 +cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolor/cpp/build/ + +# 下载图片 +wget https://raw.githubusercontent.com/WongKinYiu/yolor/paper/inference/images/horses.jpg + +# 执行 +./yolor_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +0.000000,185.201431, 315.673126, 410.071594, 0.959289, 17 +433.802826,211.603455, 595.489319, 346.425537, 0.952615, 17 +230.446854,195.618805, 418.365479, 362.712128, 0.884253, 17 +336.545624,208.555618, 457.704315, 323.543152, 0.788450, 17 +0.896423,183.936996, 154.788727, 304.916412, 0.672804, 17 +``` diff --git a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc b/model_zoo/vision/yolor/cpp/yolor.cc similarity index 66% rename from model_zoo/vision/ppyoloe/cpp/ppyoloe.cc rename to model_zoo/vision/yolor/cpp/yolor.cc index e63f29e62a..db194583fc 100644 --- a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc +++ b/model_zoo/vision/yolor/cpp/yolor.cc @@ -16,28 +16,18 @@ int main() { namespace vis = fastdeploy::vision; - - std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; - std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; - std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; - std::string img_path = "000000014439_640x640.jpg"; - std::string vis_path = "vis.jpeg"; - - auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + auto model = vis::wongkinyiu::YOLOR("yolor.onnx"); if (!model.Initialized()) { std::cerr << "Init Failed." << std::endl; return -1; } - - cv::Mat im = cv::imread(img_path); + cv::Mat im = cv::imread("horses.jpg"); cv::Mat vis_im = im.clone(); vis::DetectionResult res; if (!model.Predict(&im, &res)) { std::cerr << "Prediction Failed." << std::endl; return -1; - } else { - std::cout << "Prediction Done!" << std::endl; } // 输出预测框结果 @@ -45,7 +35,6 @@ int main() { // 可视化预测结果 vis::Visualize::VisDetection(&vis_im, res); - cv::imwrite(vis_path, vis_im); - std::cout << "Detect Done! Saved: " << vis_path << std::endl; + cv::imwrite("vis_result.jpg", vis_im); return 0; } diff --git a/model_zoo/vision/yolor/yolor.py b/model_zoo/vision/yolor/yolor.py new file mode 100644 index 0000000000..56d3f9689e --- /dev/null +++ b/model_zoo/vision/yolor/yolor.py @@ -0,0 +1,21 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +test_jpg_url = "https://raw.githubusercontent.com/WongKinYiu/yolor/paper/inference/images/horses.jpg" +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.wongkinyiu.YOLOR("yolor.onnx") + +# 预测图片 +im = cv2.imread("horses.jpg") +result = model.predict(im, conf_threshold=0.25, nms_iou_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) +print(model.runtime_option) diff --git a/setup.py b/setup.py index e76f057b1c..5147025b4e 100644 --- a/setup.py +++ b/setup.py @@ -326,25 +326,14 @@ def run(self): shutil.copy("LICENSE", "fastdeploy") depend_libs = list() - if platform.system().lower() == "linux": - for f in os.listdir(".setuptools-cmake-build"): - full_name = os.path.join(".setuptools-cmake-build", f) - if not os.path.isfile(full_name): - continue - if not full_name.count("fastdeploy_main.cpython-"): - continue - if not full_name.endswith(".so"): - continue - # modify the search path of libraries - command = "patchelf --set-rpath '$ORIGIN/libs/' {}".format( - full_name) - # The sw_64 not suppot patchelf, so we just disable that. - if platform.machine() != 'sw_64' and platform.machine( - ) != 'mips64': - assert os.system( - command - ) == 0, "patch fastdeploy_main.cpython-36m-x86_64-linux-gnu.so failed, the command: {}".format( - command) + # modify the search path of libraries + command = "patchelf --set-rpath '$ORIGIN/libs/' .setuptools-cmake-build/fastdeploy_main.cpython-37m-x86_64-linux-gnu.so" + # The sw_64 not suppot patchelf, so we just disable that. + if platform.machine() != 'sw_64' and platform.machine() != 'mips64': + assert os.system( + command + ) == 0, "patch fastdeploy_main.cpython-37m-x86_64-linux-gnu.so failed, the command: {}".format( + command) for f in os.listdir(".setuptools-cmake-build"): if not os.path.isfile(os.path.join(".setuptools-cmake-build", f)): From 3aa015fd722877e7c449a25a9ad0eedbc6fc099a Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 20 Jul 2022 07:58:07 +0000 Subject: [PATCH 36/58] for merge --- examples/CMakeLists.txt | 25 +-- examples/vision/ppdet_ppyoloe.cc | 51 ++++++ fastdeploy/__init__.py | 2 +- fastdeploy/download.py | 2 +- fastdeploy/utils/utils.h | 4 + fastdeploy/vision.h | 1 + fastdeploy/vision/__init__.py | 1 + .../vision/common/processors/convert.cc | 62 +++++++ fastdeploy/vision/common/processors/convert.h | 42 +++++ .../vision/common/processors/transform.h | 1 + fastdeploy/vision/meituan/yolov6.cc | 8 +- fastdeploy/vision/ppcls/model.cc | 13 ++ fastdeploy/vision/ppcls/model.h | 14 ++ fastdeploy/vision/ppcls/ppcls_pybind.cc | 2 +- fastdeploy/vision/ppdet/__init__.py | 39 ++++ fastdeploy/vision/ppdet/ppdet_pybind.cc | 32 ++++ fastdeploy/vision/ppdet/ppyoloe.cc | 170 ++++++++++++++++++ fastdeploy/vision/ppdet/ppyoloe.h | 44 +++++ fastdeploy/vision/ultralytics/yolov5.cc | 13 +- fastdeploy/vision/utils/sort_det_res.cc | 6 +- fastdeploy/vision/vision_pybind.cc | 6 +- fastdeploy/vision/visualize/detection.cc | 4 +- model_zoo/vision/ppyoloe/README.md | 52 ++++++ model_zoo/vision/ppyoloe/api.md | 74 ++++++++ model_zoo/vision/ppyoloe/cpp/CMakeLists.txt | 17 ++ model_zoo/vision/ppyoloe/cpp/README.md | 39 ++++ model_zoo/vision/ppyoloe/cpp/ppyoloe.cc | 51 ++++++ model_zoo/vision/ppyoloe/ppyoloe.py | 24 +++ setup.py | 27 ++- 29 files changed, 794 insertions(+), 32 deletions(-) create mode 100644 examples/vision/ppdet_ppyoloe.cc create mode 100644 fastdeploy/vision/common/processors/convert.cc create mode 100644 fastdeploy/vision/common/processors/convert.h create mode 100644 fastdeploy/vision/ppdet/__init__.py create mode 100644 fastdeploy/vision/ppdet/ppdet_pybind.cc create mode 100644 fastdeploy/vision/ppdet/ppyoloe.cc create mode 100644 fastdeploy/vision/ppdet/ppyoloe.h create mode 100644 model_zoo/vision/ppyoloe/README.md create mode 100644 model_zoo/vision/ppyoloe/api.md create mode 100644 model_zoo/vision/ppyoloe/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/ppyoloe/cpp/README.md create mode 100644 model_zoo/vision/ppyoloe/cpp/ppyoloe.cc create mode 100644 model_zoo/vision/ppyoloe/ppyoloe.py diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 67361223c6..112193c86a 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,25 +1,26 @@ -function(add_fastdeploy_executable field url model) +function(add_fastdeploy_executable FIELD CC_FILE) # temp target name/file var in function scope - set(TEMP_TARGET_FILE ${PROJECT_SOURCE_DIR}/examples/${field}/${url}_${model}.cc) - set(TEMP_TARGET_NAME ${field}_${url}_${model}) + set(TEMP_TARGET_FILE ${CC_FILE}) + string(REGEX MATCHALL "[0-9A-Za-z_]*.cc" FILE_NAME ${CC_FILE}) + string(REGEX REPLACE ".cc" "" FILE_PREFIX ${FILE_NAME}) + set(TEMP_TARGET_NAME ${FIELD}_${FILE_PREFIX}) if (EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy) add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE}) target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy) - message(STATUS "Found source file: [${field}/${url}_${model}.cc], ADD!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") - else () - message(WARNING "Can not found source file: [${field}/${url}_${model}.cc], SKIP!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") + message(STATUS " Added FastDeploy Executable : ${TEMP_TARGET_NAME}") endif() unset(TEMP_TARGET_FILE) unset(TEMP_TARGET_NAME) endfunction() # vision examples -if (WITH_VISION_EXAMPLES) - add_fastdeploy_executable(vision ultralytics yolov5) - add_fastdeploy_executable(vision meituan yolov6) - add_fastdeploy_executable(vision wongkinyiu yolov7) - add_fastdeploy_executable(vision megvii yolox) - add_fastdeploy_executable(vision wongkinyiu yolor) +if(WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples/vision) + message(STATUS "") + message(STATUS "*************FastDeploy Examples Summary**********") + file(GLOB ALL_VISION_EXAMPLE_SRCS ${PROJECT_SOURCE_DIR}/examples/vision/*.cc) + foreach(_CC_FILE ${ALL_VISION_EXAMPLE_SRCS}) + add_fastdeploy_executable(vision ${_CC_FILE}) + endforeach() endif() # other examples ... diff --git a/examples/vision/ppdet_ppyoloe.cc b/examples/vision/ppdet_ppyoloe.cc new file mode 100644 index 0000000000..b234021c92 --- /dev/null +++ b/examples/vision/ppdet_ppyoloe.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + std::string img_path = "test.jpeg"; + std::string vis_path = "vis.jpeg"; + + auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/fastdeploy/__init__.py b/fastdeploy/__init__.py index 500e7cc42a..68006c1bed 100644 --- a/fastdeploy/__init__.py +++ b/fastdeploy/__init__.py @@ -17,7 +17,7 @@ from .fastdeploy_runtime import * from . import fastdeploy_main as C from . import vision -from .download import download +from .download import download, download_and_decompress def TensorInfoStr(tensor_info): diff --git a/fastdeploy/download.py b/fastdeploy/download.py index e00af098df..67f21d8e76 100644 --- a/fastdeploy/download.py +++ b/fastdeploy/download.py @@ -156,7 +156,7 @@ def decompress(fname): def url2dir(url, path, rename=None): full_name = download(url, path, rename, show_progress=True) - print("SDK is donwloaded, now extracting...") + print("File is donwloaded, now extracting...") if url.count(".tgz") > 0 or url.count(".tar") > 0 or url.count("zip") > 0: return decompress(full_name) diff --git a/fastdeploy/utils/utils.h b/fastdeploy/utils/utils.h index 23ca6ee51a..e605ee5a75 100644 --- a/fastdeploy/utils/utils.h +++ b/fastdeploy/utils/utils.h @@ -65,6 +65,10 @@ class FASTDEPLOY_DECL FDLogger { bool verbose_ = true; }; +#ifndef __REL_FILE__ +#define __REL_FILE__ __FILE__ +#endif + #define FDERROR \ FDLogger(true, "[ERROR]") << __REL_FILE__ << "(" << __LINE__ \ << ")::" << __FUNCTION__ << "\t" diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index 4398463251..68c0881cac 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -18,6 +18,7 @@ #include "fastdeploy/vision/megvii/yolox.h" #include "fastdeploy/vision/meituan/yolov6.h" #include "fastdeploy/vision/ppcls/model.h" +#include "fastdeploy/vision/ppdet/ppyoloe.h" #include "fastdeploy/vision/ultralytics/yolov5.h" #include "fastdeploy/vision/wongkinyiu/yolor.h" #include "fastdeploy/vision/wongkinyiu/yolov7.h" diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 7122bede0b..6acbf0c376 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -15,6 +15,7 @@ from . import evaluation from . import ppcls +from . import ppdet from . import ultralytics from . import meituan from . import megvii diff --git a/fastdeploy/vision/common/processors/convert.cc b/fastdeploy/vision/common/processors/convert.cc new file mode 100644 index 0000000000..a7ca6de07a --- /dev/null +++ b/fastdeploy/vision/common/processors/convert.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/convert.h" + +namespace fastdeploy { + +namespace vision { + +Convert::Convert(const std::vector& alpha, + const std::vector& beta) { + FDASSERT(alpha.size() == beta.size(), + "Convert: requires the size of alpha equal to the size of beta."); + FDASSERT(alpha.size() != 0, + "Convert: requires the size of alpha and beta > 0."); + alpha_.assign(alpha.begin(), alpha.end()); + beta_.assign(beta.begin(), beta.end()); +} + +bool Convert::CpuRun(Mat* mat) { + cv::Mat* im = mat->GetCpuMat(); + std::vector split_im; + cv::split(*im, split_im); + for (int c = 0; c < im->channels(); c++) { + split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); + } + cv::merge(split_im, *im); + return true; +} + +#ifdef ENABLE_OPENCV_CUDA +bool Convert::GpuRun(Mat* mat) { + cv::cuda::GpuMat* im = mat->GetGpuMat(); + std::vector split_im; + cv::cuda::split(*im, split_im); + for (int c = 0; c < im->channels(); c++) { + split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); + } + cv::cuda::merge(split_im, *im); + return true; +} +#endif + +bool Convert::Run(Mat* mat, const std::vector& alpha, + const std::vector& beta, ProcLib lib) { + auto c = Convert(alpha, beta); + return c(mat, lib); +} + +} // namespace vision +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/common/processors/convert.h b/fastdeploy/vision/common/processors/convert.h new file mode 100644 index 0000000000..5d5a5276f5 --- /dev/null +++ b/fastdeploy/vision/common/processors/convert.h @@ -0,0 +1,42 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/vision/common/processors/base.h" + +namespace fastdeploy { +namespace vision { +class Convert : public Processor { + public: + Convert(const std::vector& alpha, const std::vector& beta); + + bool CpuRun(Mat* mat); +#ifdef ENABLE_OPENCV_CUDA + bool GpuRun(Mat* mat); +#endif + std::string Name() { return "Convert"; } + + // Compute `result = mat * alpha + beta` directly by channel. + // The default behavior is the same as OpenCV's convertTo method. + static bool Run(Mat* mat, const std::vector& alpha, + const std::vector& beta, + ProcLib lib = ProcLib::OPENCV_CPU); + + private: + std::vector alpha_; + std::vector beta_; +}; +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 12eec8d72d..08073b4e42 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -17,6 +17,7 @@ #include "fastdeploy/vision/common/processors/cast.h" #include "fastdeploy/vision/common/processors/center_crop.h" #include "fastdeploy/vision/common/processors/color_space_convert.h" +#include "fastdeploy/vision/common/processors/convert.h" #include "fastdeploy/vision/common/processors/hwc2chw.h" #include "fastdeploy/vision/common/processors/normalize.h" #include "fastdeploy/vision/common/processors/pad.h" diff --git a/fastdeploy/vision/meituan/yolov6.cc b/fastdeploy/vision/meituan/yolov6.cc index b75f2016ee..8ac7377194 100644 --- a/fastdeploy/vision/meituan/yolov6.cc +++ b/fastdeploy/vision/meituan/yolov6.cc @@ -129,8 +129,12 @@ bool YOLOv6::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - std::vector(mat->Channels(), 1.0)); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), diff --git a/fastdeploy/vision/ppcls/model.cc b/fastdeploy/vision/ppcls/model.cc index 915cb97512..a89a1e4731 100644 --- a/fastdeploy/vision/ppcls/model.cc +++ b/fastdeploy/vision/ppcls/model.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/utils/utils.h" diff --git a/fastdeploy/vision/ppcls/model.h b/fastdeploy/vision/ppcls/model.h index fae99d4f3c..71800a7d76 100644 --- a/fastdeploy/vision/ppcls/model.h +++ b/fastdeploy/vision/ppcls/model.h @@ -1,3 +1,17 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #pragma once #include "fastdeploy/fastdeploy_model.h" #include "fastdeploy/vision/common/processors/transform.h" diff --git a/fastdeploy/vision/ppcls/ppcls_pybind.cc b/fastdeploy/vision/ppcls/ppcls_pybind.cc index 828bef3c7a..10ff5ee109 100644 --- a/fastdeploy/vision/ppcls/ppcls_pybind.cc +++ b/fastdeploy/vision/ppcls/ppcls_pybind.cc @@ -14,7 +14,7 @@ #include "fastdeploy/pybind/main.h" namespace fastdeploy { -void BindPpClsModel(pybind11::module& m) { +void BindPPCls(pybind11::module& m) { auto ppcls_module = m.def_submodule("ppcls", "Module to deploy PaddleClas."); pybind11::class_(ppcls_module, "Model") .def(pybind11::init(ppdet_module, + "PPYOLOE") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::PPYOLOE& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.cc b/fastdeploy/vision/ppdet/ppyoloe.cc new file mode 100644 index 0000000000..c215ecb0ca --- /dev/null +++ b/fastdeploy/vision/ppdet/ppyoloe.cc @@ -0,0 +1,170 @@ +#include "fastdeploy/vision/ppdet/ppyoloe.h" +#include "fastdeploy/vision/utils/utils.h" +#include "yaml-cpp/yaml.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER}; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool PPYOLOE::Initialize() { + if (!BuildPreprocessPipelineFromConfig()) { + std::cout << "Failed to build preprocess pipeline from configuration file." + << std::endl; + return false; + } + if (!InitRuntime()) { + std::cout << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool PPYOLOE::BuildPreprocessPipelineFromConfig() { + processors_.clear(); + YAML::Node cfg; + try { + cfg = YAML::LoadFile(config_file_); + } catch (YAML::BadFile& e) { + std::cout << "Failed to load yaml file " << config_file_ + << ", maybe you should check this file." << std::endl; + return false; + } + + if (cfg["arch"].as() != "YOLO") { + std::cout << "Require the arch of model is YOLO, but arch defined in " + "config file is " + << cfg["arch"].as() << "." << std::endl; + return false; + } + processors_.push_back(std::make_shared()); + + for (const auto& op : cfg["Preprocess"]) { + std::string op_name = op["type"].as(); + if (op_name == "NormalizeImage") { + auto mean = op["mean"].as>(); + auto std = op["std"].as>(); + bool is_scale = op["is_scale"].as(); + processors_.push_back(std::make_shared(mean, std, is_scale)); + } else if (op_name == "Resize") { + bool keep_ratio = op["keep_ratio"].as(); + auto target_size = op["target_size"].as>(); + int interp = op["interp"].as(); + FDASSERT(target_size.size(), + "Require size of target_size be 2, but now it's " + + std::to_string(target_size.size()) + "."); + FDASSERT(!keep_ratio, + "Only support keep_ratio is false while deploy " + "PaddleDetection model."); + int width = target_size[1]; + int height = target_size[0]; + processors_.push_back( + std::make_shared(width, height, -1.0, -1.0, interp, false)); + } else if (op_name == "Permute") { + processors_.push_back(std::make_shared()); + } else { + std::cout << "Unexcepted preprocess operator: " << op_name << "." + << std::endl; + return false; + } + } + return true; +} + +bool PPYOLOE::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + std::cout << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + } + + outputs->resize(2); + (*outputs)[0].name = InputInfoOfRuntime(0).name; + mat->ShareWithTensor(&((*outputs)[0])); + + // reshape to [1, c, h, w] + (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1); + + (*outputs)[1].Allocate({1, 2}, FDDataType::FP32, InputInfoOfRuntime(1).name); + float* ptr = static_cast((*outputs)[1].MutableData()); + ptr[0] = mat->Height() * 1.0 / mat->Height(); + ptr[1] = mat->Width() * 1.0 / mat->Width(); + return true; +} + +bool PPYOLOE::Postprocess(std::vector& infer_result, + DetectionResult* result, float conf_threshold, + float nms_threshold) { + FDASSERT(infer_result[1].shape[0] == 1, + "Only support batch = 1 in FastDeploy now."); + int box_num = 0; + if (infer_result[1].dtype == FDDataType::INT32) { + box_num = *(static_cast(infer_result[1].Data())); + } else if (infer_result[1].dtype == FDDataType::INT64) { + box_num = *(static_cast(infer_result[1].Data())); + } else { + FDASSERT( + false, + "The output box_num of PPYOLOE model should be type of int32/int64."); + } + result->Reserve(box_num); + float* box_data = static_cast(infer_result[0].Data()); + for (size_t i = 0; i < box_num; ++i) { + if (box_data[i * 6 + 1] < conf_threshold) { + continue; + } + result->label_ids.push_back(box_data[i * 6]); + result->scores.push_back(box_data[i * 6 + 1]); + result->boxes.emplace_back( + std::array{box_data[i * 6 + 2], box_data[i * 6 + 3], + box_data[i * 6 + 4] - box_data[i * 6 + 2], + box_data[i * 6 + 5] - box_data[i * 6 + 3]}); + } + return true; +} + +bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold, float iou_threshold) { + Mat mat(*im); + std::vector processed_data; + if (!Preprocess(&mat, &processed_data)) { + FDERROR << "Failed to preprocess input data while using model:" + << ModelName() << "." << std::endl; + return false; + } + + std::vector infer_result; + if (!Infer(processed_data, &infer_result)) { + FDERROR << "Failed to inference while using model:" << ModelName() << "." + << std::endl; + return false; + } + + if (!Postprocess(infer_result, result, conf_threshold, iou_threshold)) { + FDERROR << "Failed to postprocess while using model:" << ModelName() << "." + << std::endl; + return false; + } + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.h b/fastdeploy/vision/ppdet/ppyoloe.h new file mode 100644 index 0000000000..a3db268ca4 --- /dev/null +++ b/fastdeploy/vision/ppdet/ppyoloe.h @@ -0,0 +1,44 @@ +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { + public: + PPYOLOE(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } + + virtual bool Initialize(); + + virtual bool BuildPreprocessPipelineFromConfig(); + + virtual bool Preprocess(Mat* mat, std::vector* outputs); + + virtual bool Postprocess(std::vector& infer_result, + DetectionResult* result, float conf_threshold, + float nms_threshold); + + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.5, float nms_threshold = 0.7); + + private: + std::vector> processors_; + std::string config_file_; + // PaddleDetection can export model without nms + // This flag will help us to handle the different + // situation + bool has_nms_; +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ultralytics/yolov5.cc b/fastdeploy/vision/ultralytics/yolov5.cc index c8c6e06a94..b2e6009b1c 100644 --- a/fastdeploy/vision/ultralytics/yolov5.cc +++ b/fastdeploy/vision/ultralytics/yolov5.cc @@ -126,8 +126,12 @@ bool YOLOv5::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - std::vector(mat->Channels(), 1.0)); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), @@ -198,6 +202,11 @@ bool YOLOv5::Postprocess( result->scores.push_back(confidence); } } + + if (result->boxes.size() == 0) { + return true; + } + utils::NMS(result, nms_iou_threshold); // scale the boxes to the origin image shape diff --git a/fastdeploy/vision/utils/sort_det_res.cc b/fastdeploy/vision/utils/sort_det_res.cc index e4a0db9761..790126a6ac 100644 --- a/fastdeploy/vision/utils/sort_det_res.cc +++ b/fastdeploy/vision/utils/sort_det_res.cc @@ -68,7 +68,11 @@ void MergeSort(DetectionResult* result, size_t low, size_t high) { void SortDetectionResult(DetectionResult* result) { size_t low = 0; - size_t high = result->scores.size() - 1; + size_t high = result->scores.size(); + if (high == 0) { + return; + } + high = high - 1; MergeSort(result, low, high); } diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index e4ba05b893..0334303ce6 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -16,7 +16,8 @@ namespace fastdeploy { -void BindPpClsModel(pybind11::module& m); +void BindPPCls(pybind11::module& m); +void BindPPDet(pybind11::module& m); void BindWongkinyiu(pybind11::module& m); void BindUltralytics(pybind11::module& m); void BindMeituan(pybind11::module& m); @@ -41,7 +42,8 @@ void BindVision(pybind11::module& m) { .def("__repr__", &vision::DetectionResult::Str) .def("__str__", &vision::DetectionResult::Str); - BindPpClsModel(m); + BindPPCls(m); + BindPPDet(m); BindUltralytics(m); BindWongkinyiu(m); BindMeituan(m); diff --git a/fastdeploy/vision/visualize/detection.cc b/fastdeploy/vision/visualize/detection.cc index e5f01bdd35..6d60072447 100644 --- a/fastdeploy/vision/visualize/detection.cc +++ b/fastdeploy/vision/visualize/detection.cc @@ -43,7 +43,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, } std::string text = id + "," + score; int font = cv::FONT_HERSHEY_SIMPLEX; - cv::Size text_size = cv::getTextSize(text, font, font_size, 0.5, nullptr); + cv::Size text_size = cv::getTextSize(text, font, font_size, 1, nullptr); cv::Point origin; origin.x = rect.x; origin.y = rect.y; @@ -52,7 +52,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, text_size.width, text_size.height); cv::rectangle(*im, rect, rect_color, line_size); cv::putText(*im, text, origin, font, font_size, cv::Scalar(255, 255, 255), - 0.5); + 1); } } diff --git a/model_zoo/vision/ppyoloe/README.md b/model_zoo/vision/ppyoloe/README.md new file mode 100644 index 0000000000..42d18104ad --- /dev/null +++ b/model_zoo/vision/ppyoloe/README.md @@ -0,0 +1,52 @@ +# PaddleDetection/PPYOLOE部署示例 + +- 当前支持PaddleDetection版本为[release/2.4](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4) + +本文档说明如何进行[PPYOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)的快速部署推理。本目录结构如下 +``` +. +├── cpp # C++ 代码目录 +│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 +│   ├── README.md # C++ 代码编译部署文档 +│   └── ppyoloe.cc # C++ 示例代码 +├── README.md # PPYOLOE 部署文档 +└── ppyoloe.py # Python示例代码 +``` + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python +``` + +## Python部署 + +执行如下代码即会自动下载PPYOLOE模型和测试图片 +``` +python ppyoloe.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 +414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 +163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 +267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 +581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 +104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 +348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 +364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 +75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 +328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 +504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 +379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 +25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [PPYOLOE API文档](./api.md) diff --git a/model_zoo/vision/ppyoloe/api.md b/model_zoo/vision/ppyoloe/api.md new file mode 100644 index 0000000000..1c5cbcaadb --- /dev/null +++ b/model_zoo/vision/ppyoloe/api.md @@ -0,0 +1,74 @@ +# PPYOLOE API说明 + +## Python API + +### PPYOLOE类 +``` +fastdeploy.vision.ultralytics.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=fd.Frontend.PADDLE) +``` +PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 模型推理配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) + +示例代码参考[ppyoloe.py](./ppyoloe.py) + + +## C++ API + +### PPYOLOE类 +``` +fastdeploy::vision::ultralytics::PPYOLOE( + const string& model_file, + const string& params_file, + const string& config_file, + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 模型推理配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) + +示例代码参考[cpp/yolov5.cc](cpp/yolov5.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt new file mode 100644 index 0000000000..e681566517 --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(ppyoloe_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(ppyoloe_demo ${PROJECT_SOURCE_DIR}/ppyoloe.cc) +# 添加FastDeploy库依赖 +target_link_libraries(ppyoloe_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/ppyoloe/cpp/README.md b/model_zoo/vision/ppyoloe/cpp/README.md new file mode 100644 index 0000000000..1027c2eeb2 --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/README.md @@ -0,0 +1,39 @@ +# 编译PPYOLOE示例 + + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 下载模型和图片 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz +tar xvf ppyoloe_crn_l_300e_coco.tgz +wget https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg + +# 执行 +./ppyoloe_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 +414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 +163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 +267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 +581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 +104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 +348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 +364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 +75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 +328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 +504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 +379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 +25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 +``` diff --git a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc new file mode 100644 index 0000000000..e63f29e62a --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + std::string img_path = "000000014439_640x640.jpg"; + std::string vis_path = "vis.jpeg"; + + auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/model_zoo/vision/ppyoloe/ppyoloe.py b/model_zoo/vision/ppyoloe/ppyoloe.py new file mode 100644 index 0000000000..7d79dfd8cf --- /dev/null +++ b/model_zoo/vision/ppyoloe/ppyoloe.py @@ -0,0 +1,24 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +model_url = "https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz" +test_jpg_url = "https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg" +fd.download_and_decompress(model_url, ".") +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.ppdet.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel", + "ppyoloe_crn_l_300e_coco/model.pdiparams", + "ppyoloe_crn_l_300e_coco/infer_cfg.yml") + +# 预测图片 +im = cv2.imread("000000014439_640x640.jpg") +result = model.predict(im, conf_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) diff --git a/setup.py b/setup.py index 5147025b4e..e76f057b1c 100644 --- a/setup.py +++ b/setup.py @@ -326,14 +326,25 @@ def run(self): shutil.copy("LICENSE", "fastdeploy") depend_libs = list() - # modify the search path of libraries - command = "patchelf --set-rpath '$ORIGIN/libs/' .setuptools-cmake-build/fastdeploy_main.cpython-37m-x86_64-linux-gnu.so" - # The sw_64 not suppot patchelf, so we just disable that. - if platform.machine() != 'sw_64' and platform.machine() != 'mips64': - assert os.system( - command - ) == 0, "patch fastdeploy_main.cpython-37m-x86_64-linux-gnu.so failed, the command: {}".format( - command) + if platform.system().lower() == "linux": + for f in os.listdir(".setuptools-cmake-build"): + full_name = os.path.join(".setuptools-cmake-build", f) + if not os.path.isfile(full_name): + continue + if not full_name.count("fastdeploy_main.cpython-"): + continue + if not full_name.endswith(".so"): + continue + # modify the search path of libraries + command = "patchelf --set-rpath '$ORIGIN/libs/' {}".format( + full_name) + # The sw_64 not suppot patchelf, so we just disable that. + if platform.machine() != 'sw_64' and platform.machine( + ) != 'mips64': + assert os.system( + command + ) == 0, "patch fastdeploy_main.cpython-36m-x86_64-linux-gnu.so failed, the command: {}".format( + command) for f in os.listdir(".setuptools-cmake-build"): if not os.path.isfile(os.path.join(".setuptools-cmake-build", f)): From d6b98aa507ac785796541dfe18822204879376bf Mon Sep 17 00:00:00 2001 From: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com> Date: Wed, 20 Jul 2022 15:59:53 +0800 Subject: [PATCH 37/58] Develop (#11) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> --- examples/CMakeLists.txt | 26 +-- examples/vision/ppdet_ppyoloe.cc | 51 ++++++ fastdeploy/__init__.py | 2 +- fastdeploy/download.py | 2 +- fastdeploy/utils/utils.h | 4 + fastdeploy/vision.h | 1 + fastdeploy/vision/__init__.py | 1 + .../vision/common/processors/convert.cc | 62 +++++++ fastdeploy/vision/common/processors/convert.h | 42 +++++ .../vision/common/processors/transform.h | 1 + fastdeploy/vision/meituan/yolov6.cc | 28 +-- fastdeploy/vision/ppcls/model.cc | 19 +- fastdeploy/vision/ppcls/model.h | 16 +- fastdeploy/vision/ppcls/ppcls_pybind.cc | 2 +- fastdeploy/vision/ppdet/__init__.py | 39 ++++ fastdeploy/vision/ppdet/ppdet_pybind.cc | 32 ++++ fastdeploy/vision/ppdet/ppyoloe.cc | 170 ++++++++++++++++++ fastdeploy/vision/ppdet/ppyoloe.h | 44 +++++ fastdeploy/vision/ultralytics/yolov5.cc | 19 +- fastdeploy/vision/utils/sort_det_res.cc | 6 +- fastdeploy/vision/vision_pybind.cc | 10 +- fastdeploy/vision/visualize/detection.cc | 4 +- model_zoo/vision/ppyoloe/README.md | 52 ++++++ model_zoo/vision/ppyoloe/api.md | 74 ++++++++ model_zoo/vision/ppyoloe/cpp/CMakeLists.txt | 17 ++ model_zoo/vision/ppyoloe/cpp/README.md | 39 ++++ model_zoo/vision/ppyoloe/cpp/ppyoloe.cc | 51 ++++++ model_zoo/vision/ppyoloe/ppyoloe.py | 24 +++ setup.py | 30 +++- 29 files changed, 818 insertions(+), 50 deletions(-) create mode 100644 examples/vision/ppdet_ppyoloe.cc create mode 100644 fastdeploy/vision/common/processors/convert.cc create mode 100644 fastdeploy/vision/common/processors/convert.h create mode 100644 fastdeploy/vision/ppdet/__init__.py create mode 100644 fastdeploy/vision/ppdet/ppdet_pybind.cc create mode 100644 fastdeploy/vision/ppdet/ppyoloe.cc create mode 100644 fastdeploy/vision/ppdet/ppyoloe.h create mode 100644 model_zoo/vision/ppyoloe/README.md create mode 100644 model_zoo/vision/ppyoloe/api.md create mode 100644 model_zoo/vision/ppyoloe/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/ppyoloe/cpp/README.md create mode 100644 model_zoo/vision/ppyoloe/cpp/ppyoloe.cc create mode 100644 model_zoo/vision/ppyoloe/ppyoloe.py diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 1e2dc43bd4..112193c86a 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,24 +1,26 @@ -function(add_fastdeploy_executable field url model) +function(add_fastdeploy_executable FIELD CC_FILE) # temp target name/file var in function scope - set(TEMP_TARGET_FILE ${PROJECT_SOURCE_DIR}/examples/${field}/${url}_${model}.cc) - set(TEMP_TARGET_NAME ${field}_${url}_${model}) + set(TEMP_TARGET_FILE ${CC_FILE}) + string(REGEX MATCHALL "[0-9A-Za-z_]*.cc" FILE_NAME ${CC_FILE}) + string(REGEX REPLACE ".cc" "" FILE_PREFIX ${FILE_NAME}) + set(TEMP_TARGET_NAME ${FIELD}_${FILE_PREFIX}) if (EXISTS ${TEMP_TARGET_FILE} AND TARGET fastdeploy) add_executable(${TEMP_TARGET_NAME} ${TEMP_TARGET_FILE}) target_link_libraries(${TEMP_TARGET_NAME} PUBLIC fastdeploy) - message(STATUS "Found source file: [${field}/${url}_${model}.cc], ADD!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") - else () - message(WARNING "Can not found source file: [${field}/${url}_${model}.cc], SKIP!!! fastdeploy executable: [${TEMP_TARGET_NAME}] !") + message(STATUS " Added FastDeploy Executable : ${TEMP_TARGET_NAME}") endif() unset(TEMP_TARGET_FILE) unset(TEMP_TARGET_NAME) endfunction() # vision examples -if (WITH_VISION_EXAMPLES) - add_fastdeploy_executable(vision ultralytics yolov5) - add_fastdeploy_executable(vision meituan yolov6) - add_fastdeploy_executable(vision wongkinyiu yolov7) - add_fastdeploy_executable(vision megvii yolox) +if(WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples/vision) + message(STATUS "") + message(STATUS "*************FastDeploy Examples Summary**********") + file(GLOB ALL_VISION_EXAMPLE_SRCS ${PROJECT_SOURCE_DIR}/examples/vision/*.cc) + foreach(_CC_FILE ${ALL_VISION_EXAMPLE_SRCS}) + add_fastdeploy_executable(vision ${_CC_FILE}) + endforeach() endif() -# other examples ... \ No newline at end of file +# other examples ... diff --git a/examples/vision/ppdet_ppyoloe.cc b/examples/vision/ppdet_ppyoloe.cc new file mode 100644 index 0000000000..b234021c92 --- /dev/null +++ b/examples/vision/ppdet_ppyoloe.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + std::string img_path = "test.jpeg"; + std::string vis_path = "vis.jpeg"; + + auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/fastdeploy/__init__.py b/fastdeploy/__init__.py index 500e7cc42a..68006c1bed 100644 --- a/fastdeploy/__init__.py +++ b/fastdeploy/__init__.py @@ -17,7 +17,7 @@ from .fastdeploy_runtime import * from . import fastdeploy_main as C from . import vision -from .download import download +from .download import download, download_and_decompress def TensorInfoStr(tensor_info): diff --git a/fastdeploy/download.py b/fastdeploy/download.py index e00af098df..67f21d8e76 100644 --- a/fastdeploy/download.py +++ b/fastdeploy/download.py @@ -156,7 +156,7 @@ def decompress(fname): def url2dir(url, path, rename=None): full_name = download(url, path, rename, show_progress=True) - print("SDK is donwloaded, now extracting...") + print("File is donwloaded, now extracting...") if url.count(".tgz") > 0 or url.count(".tar") > 0 or url.count("zip") > 0: return decompress(full_name) diff --git a/fastdeploy/utils/utils.h b/fastdeploy/utils/utils.h index 1b9f625b5e..9312084265 100644 --- a/fastdeploy/utils/utils.h +++ b/fastdeploy/utils/utils.h @@ -64,6 +64,10 @@ class FASTDEPLOY_DECL FDLogger { bool verbose_ = true; }; +#ifndef __REL_FILE__ +#define __REL_FILE__ __FILE__ +#endif + #define FDERROR \ FDLogger(true, "[ERROR]") \ << __REL_FILE__ << "(" << __LINE__ << ")::" << __FUNCTION__ << "\t" diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index ac3f006c0a..cafe310c70 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -16,6 +16,7 @@ #include "fastdeploy/core/config.h" #ifdef ENABLE_VISION #include "fastdeploy/vision/ppcls/model.h" +#include "fastdeploy/vision/ppdet/ppyoloe.h" #include "fastdeploy/vision/ultralytics/yolov5.h" #include "fastdeploy/vision/wongkinyiu/yolov7.h" #include "fastdeploy/vision/meituan/yolov6.h" diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 7122bede0b..6acbf0c376 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -15,6 +15,7 @@ from . import evaluation from . import ppcls +from . import ppdet from . import ultralytics from . import meituan from . import megvii diff --git a/fastdeploy/vision/common/processors/convert.cc b/fastdeploy/vision/common/processors/convert.cc new file mode 100644 index 0000000000..a7ca6de07a --- /dev/null +++ b/fastdeploy/vision/common/processors/convert.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/common/processors/convert.h" + +namespace fastdeploy { + +namespace vision { + +Convert::Convert(const std::vector& alpha, + const std::vector& beta) { + FDASSERT(alpha.size() == beta.size(), + "Convert: requires the size of alpha equal to the size of beta."); + FDASSERT(alpha.size() != 0, + "Convert: requires the size of alpha and beta > 0."); + alpha_.assign(alpha.begin(), alpha.end()); + beta_.assign(beta.begin(), beta.end()); +} + +bool Convert::CpuRun(Mat* mat) { + cv::Mat* im = mat->GetCpuMat(); + std::vector split_im; + cv::split(*im, split_im); + for (int c = 0; c < im->channels(); c++) { + split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); + } + cv::merge(split_im, *im); + return true; +} + +#ifdef ENABLE_OPENCV_CUDA +bool Convert::GpuRun(Mat* mat) { + cv::cuda::GpuMat* im = mat->GetGpuMat(); + std::vector split_im; + cv::cuda::split(*im, split_im); + for (int c = 0; c < im->channels(); c++) { + split_im[c].convertTo(split_im[c], CV_32FC1, alpha_[c], beta_[c]); + } + cv::cuda::merge(split_im, *im); + return true; +} +#endif + +bool Convert::Run(Mat* mat, const std::vector& alpha, + const std::vector& beta, ProcLib lib) { + auto c = Convert(alpha, beta); + return c(mat, lib); +} + +} // namespace vision +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/common/processors/convert.h b/fastdeploy/vision/common/processors/convert.h new file mode 100644 index 0000000000..5d5a5276f5 --- /dev/null +++ b/fastdeploy/vision/common/processors/convert.h @@ -0,0 +1,42 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/vision/common/processors/base.h" + +namespace fastdeploy { +namespace vision { +class Convert : public Processor { + public: + Convert(const std::vector& alpha, const std::vector& beta); + + bool CpuRun(Mat* mat); +#ifdef ENABLE_OPENCV_CUDA + bool GpuRun(Mat* mat); +#endif + std::string Name() { return "Convert"; } + + // Compute `result = mat * alpha + beta` directly by channel. + // The default behavior is the same as OpenCV's convertTo method. + static bool Run(Mat* mat, const std::vector& alpha, + const std::vector& beta, + ProcLib lib = ProcLib::OPENCV_CPU); + + private: + std::vector alpha_; + std::vector beta_; +}; +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/transform.h b/fastdeploy/vision/common/processors/transform.h index 12eec8d72d..08073b4e42 100644 --- a/fastdeploy/vision/common/processors/transform.h +++ b/fastdeploy/vision/common/processors/transform.h @@ -17,6 +17,7 @@ #include "fastdeploy/vision/common/processors/cast.h" #include "fastdeploy/vision/common/processors/center_crop.h" #include "fastdeploy/vision/common/processors/color_space_convert.h" +#include "fastdeploy/vision/common/processors/convert.h" #include "fastdeploy/vision/common/processors/hwc2chw.h" #include "fastdeploy/vision/common/processors/normalize.h" #include "fastdeploy/vision/common/processors/pad.h" diff --git a/fastdeploy/vision/meituan/yolov6.cc b/fastdeploy/vision/meituan/yolov6.cc index 8f37bf89c6..8ac7377194 100644 --- a/fastdeploy/vision/meituan/yolov6.cc +++ b/fastdeploy/vision/meituan/yolov6.cc @@ -25,14 +25,14 @@ namespace meituan { void LetterBox(Mat* mat, std::vector size, std::vector color, bool _auto, bool scale_fill = false, bool scale_up = true, int stride = 32) { - float scale = std::min(size[1] * 1.0f / static_cast(mat->Height()), - size[0] * 1.0f / static_cast(mat->Width())); + float scale = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); if (!scale_up) { scale = std::min(scale, 1.0f); } int resize_h = int(round(static_cast(mat->Height()) * scale)); - int resize_w = int(round(static_cast(mat->Width()) * scale)); + int resize_w = int(round(static_cast(mat->Width()) * scale)); int pad_w = size[0] - resize_w; int pad_h = size[1] - resize_h; @@ -85,13 +85,13 @@ bool YOLOv6::Initialize() { is_scale_up = false; stride = 32; max_wh = 4096.0f; - + if (!InitRuntime()) { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } - // Check if the input shape is dynamic after Runtime already initialized, - // Note that, We need to force is_mini_pad 'false' to keep static + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. is_dynamic_input_ = false; auto shape = InputInfoOfRuntime(0).shape; @@ -102,7 +102,7 @@ bool YOLOv6::Initialize() { break; } } - if (!is_dynamic_input_) { + if (!is_dynamic_input_) { is_mini_pad = false; } return true; @@ -111,15 +111,15 @@ bool YOLOv6::Initialize() { bool YOLOv6::Preprocess(Mat* mat, FDTensor* output, std::map>* im_info) { // process after image load - float ratio = std::min(size[1] * 1.0f / static_cast(mat->Height()), - size[0] * 1.0f / static_cast(mat->Width())); + float ratio = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); if (ratio != 1.0) { int interp = cv::INTER_AREA; if (ratio > 1.0) { interp = cv::INTER_LINEAR; } int resize_h = int(round(static_cast(mat->Height()) * ratio)); - int resize_w = int(round(static_cast(mat->Width()) * ratio)); + int resize_w = int(round(static_cast(mat->Width()) * ratio)); Resize::Run(mat, resize_w, resize_h, -1, -1, interp); } // yolov6's preprocess steps @@ -129,8 +129,12 @@ bool YOLOv6::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - std::vector(mat->Channels(), 1.0)); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), diff --git a/fastdeploy/vision/ppcls/model.cc b/fastdeploy/vision/ppcls/model.cc index 915cb97512..c4e5b767c7 100644 --- a/fastdeploy/vision/ppcls/model.cc +++ b/fastdeploy/vision/ppcls/model.cc @@ -1,3 +1,16 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/utils/utils.h" @@ -135,6 +148,6 @@ bool Model::Predict(cv::Mat* im, ClassifyResult* result, int topk) { return true; } -} // namespace ppcls -} // namespace vision -} // namespace fastdeploy +} // namespace ppcls +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppcls/model.h b/fastdeploy/vision/ppcls/model.h index 36841d74c6..265f92d32b 100644 --- a/fastdeploy/vision/ppcls/model.h +++ b/fastdeploy/vision/ppcls/model.h @@ -1,7 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #pragma once #include "fastdeploy/fastdeploy_model.h" -#include "fastdeploy/vision/common/result.h" #include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" namespace fastdeploy { namespace vision { diff --git a/fastdeploy/vision/ppcls/ppcls_pybind.cc b/fastdeploy/vision/ppcls/ppcls_pybind.cc index ef3fffee8e..1abc0b2b7c 100644 --- a/fastdeploy/vision/ppcls/ppcls_pybind.cc +++ b/fastdeploy/vision/ppcls/ppcls_pybind.cc @@ -14,7 +14,7 @@ #include "fastdeploy/pybind/main.h" namespace fastdeploy { -void BindPpClsModel(pybind11::module& m) { +void BindPPCls(pybind11::module& m) { auto ppcls_module = m.def_submodule("ppcls", "Module to deploy PaddleClas."); pybind11::class_(ppcls_module, "Model") .def(pybind11::init(ppdet_module, + "PPYOLOE") + .def(pybind11::init()) + .def("predict", [](vision::ppdet::PPYOLOE& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.cc b/fastdeploy/vision/ppdet/ppyoloe.cc new file mode 100644 index 0000000000..c215ecb0ca --- /dev/null +++ b/fastdeploy/vision/ppdet/ppyoloe.cc @@ -0,0 +1,170 @@ +#include "fastdeploy/vision/ppdet/ppyoloe.h" +#include "fastdeploy/vision/utils/utils.h" +#include "yaml-cpp/yaml.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +PPYOLOE::PPYOLOE(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + config_file_ = config_file; + valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; + valid_gpu_backends = {Backend::ORT, Backend::PDINFER}; + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool PPYOLOE::Initialize() { + if (!BuildPreprocessPipelineFromConfig()) { + std::cout << "Failed to build preprocess pipeline from configuration file." + << std::endl; + return false; + } + if (!InitRuntime()) { + std::cout << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool PPYOLOE::BuildPreprocessPipelineFromConfig() { + processors_.clear(); + YAML::Node cfg; + try { + cfg = YAML::LoadFile(config_file_); + } catch (YAML::BadFile& e) { + std::cout << "Failed to load yaml file " << config_file_ + << ", maybe you should check this file." << std::endl; + return false; + } + + if (cfg["arch"].as() != "YOLO") { + std::cout << "Require the arch of model is YOLO, but arch defined in " + "config file is " + << cfg["arch"].as() << "." << std::endl; + return false; + } + processors_.push_back(std::make_shared()); + + for (const auto& op : cfg["Preprocess"]) { + std::string op_name = op["type"].as(); + if (op_name == "NormalizeImage") { + auto mean = op["mean"].as>(); + auto std = op["std"].as>(); + bool is_scale = op["is_scale"].as(); + processors_.push_back(std::make_shared(mean, std, is_scale)); + } else if (op_name == "Resize") { + bool keep_ratio = op["keep_ratio"].as(); + auto target_size = op["target_size"].as>(); + int interp = op["interp"].as(); + FDASSERT(target_size.size(), + "Require size of target_size be 2, but now it's " + + std::to_string(target_size.size()) + "."); + FDASSERT(!keep_ratio, + "Only support keep_ratio is false while deploy " + "PaddleDetection model."); + int width = target_size[1]; + int height = target_size[0]; + processors_.push_back( + std::make_shared(width, height, -1.0, -1.0, interp, false)); + } else if (op_name == "Permute") { + processors_.push_back(std::make_shared()); + } else { + std::cout << "Unexcepted preprocess operator: " << op_name << "." + << std::endl; + return false; + } + } + return true; +} + +bool PPYOLOE::Preprocess(Mat* mat, std::vector* outputs) { + int origin_w = mat->Width(); + int origin_h = mat->Height(); + for (size_t i = 0; i < processors_.size(); ++i) { + if (!(*(processors_[i].get()))(mat)) { + std::cout << "Failed to process image data in " << processors_[i]->Name() + << "." << std::endl; + return false; + } + } + + outputs->resize(2); + (*outputs)[0].name = InputInfoOfRuntime(0).name; + mat->ShareWithTensor(&((*outputs)[0])); + + // reshape to [1, c, h, w] + (*outputs)[0].shape.insert((*outputs)[0].shape.begin(), 1); + + (*outputs)[1].Allocate({1, 2}, FDDataType::FP32, InputInfoOfRuntime(1).name); + float* ptr = static_cast((*outputs)[1].MutableData()); + ptr[0] = mat->Height() * 1.0 / mat->Height(); + ptr[1] = mat->Width() * 1.0 / mat->Width(); + return true; +} + +bool PPYOLOE::Postprocess(std::vector& infer_result, + DetectionResult* result, float conf_threshold, + float nms_threshold) { + FDASSERT(infer_result[1].shape[0] == 1, + "Only support batch = 1 in FastDeploy now."); + int box_num = 0; + if (infer_result[1].dtype == FDDataType::INT32) { + box_num = *(static_cast(infer_result[1].Data())); + } else if (infer_result[1].dtype == FDDataType::INT64) { + box_num = *(static_cast(infer_result[1].Data())); + } else { + FDASSERT( + false, + "The output box_num of PPYOLOE model should be type of int32/int64."); + } + result->Reserve(box_num); + float* box_data = static_cast(infer_result[0].Data()); + for (size_t i = 0; i < box_num; ++i) { + if (box_data[i * 6 + 1] < conf_threshold) { + continue; + } + result->label_ids.push_back(box_data[i * 6]); + result->scores.push_back(box_data[i * 6 + 1]); + result->boxes.emplace_back( + std::array{box_data[i * 6 + 2], box_data[i * 6 + 3], + box_data[i * 6 + 4] - box_data[i * 6 + 2], + box_data[i * 6 + 5] - box_data[i * 6 + 3]}); + } + return true; +} + +bool PPYOLOE::Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold, float iou_threshold) { + Mat mat(*im); + std::vector processed_data; + if (!Preprocess(&mat, &processed_data)) { + FDERROR << "Failed to preprocess input data while using model:" + << ModelName() << "." << std::endl; + return false; + } + + std::vector infer_result; + if (!Infer(processed_data, &infer_result)) { + FDERROR << "Failed to inference while using model:" << ModelName() << "." + << std::endl; + return false; + } + + if (!Postprocess(infer_result, result, conf_threshold, iou_threshold)) { + FDERROR << "Failed to postprocess while using model:" << ModelName() << "." + << std::endl; + return false; + } + return true; +} + +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ppdet/ppyoloe.h b/fastdeploy/vision/ppdet/ppyoloe.h new file mode 100644 index 0000000000..a3db268ca4 --- /dev/null +++ b/fastdeploy/vision/ppdet/ppyoloe.h @@ -0,0 +1,44 @@ +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace ppdet { + +class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { + public: + PPYOLOE(const std::string& model_file, const std::string& params_file, + const std::string& config_file, + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::PADDLE); + + std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } + + virtual bool Initialize(); + + virtual bool BuildPreprocessPipelineFromConfig(); + + virtual bool Preprocess(Mat* mat, std::vector* outputs); + + virtual bool Postprocess(std::vector& infer_result, + DetectionResult* result, float conf_threshold, + float nms_threshold); + + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.5, float nms_threshold = 0.7); + + private: + std::vector> processors_; + std::string config_file_; + // PaddleDetection can export model without nms + // This flag will help us to handle the different + // situation + bool has_nms_; +}; +} // namespace ppdet +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/ultralytics/yolov5.cc b/fastdeploy/vision/ultralytics/yolov5.cc index 193cfe9794..0b7e50e735 100644 --- a/fastdeploy/vision/ultralytics/yolov5.cc +++ b/fastdeploy/vision/ultralytics/yolov5.cc @@ -87,8 +87,8 @@ bool YOLOv5::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } - // Check if the input shape is dynamic after Runtime already initialized, - // Note that, We need to force is_mini_pad 'false' to keep static + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. is_dynamic_input_ = false; auto shape = InputInfoOfRuntime(0).shape; @@ -99,7 +99,7 @@ bool YOLOv5::Initialize() { break; } } - if (!is_dynamic_input_) { + if (!is_dynamic_input_) { is_mini_pad = false; } return true; @@ -126,8 +126,12 @@ bool YOLOv5::Preprocess(Mat* mat, FDTensor* output, LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, stride); BGR2RGB::Run(mat); - Normalize::Run(mat, std::vector(mat->Channels(), 0.0), - std::vector(mat->Channels(), 1.0)); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), @@ -198,6 +202,11 @@ bool YOLOv5::Postprocess( result->scores.push_back(confidence); } } + + if (result->boxes.size() == 0) { + return true; + } + utils::NMS(result, nms_iou_threshold); // scale the boxes to the origin image shape diff --git a/fastdeploy/vision/utils/sort_det_res.cc b/fastdeploy/vision/utils/sort_det_res.cc index e4a0db9761..93dbb69694 100644 --- a/fastdeploy/vision/utils/sort_det_res.cc +++ b/fastdeploy/vision/utils/sort_det_res.cc @@ -68,7 +68,11 @@ void MergeSort(DetectionResult* result, size_t low, size_t high) { void SortDetectionResult(DetectionResult* result) { size_t low = 0; - size_t high = result->scores.size() - 1; + size_t high = result->scores.size(); + if (high == 0) { + return; + } + high = high - 1; MergeSort(result, low, high); } diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index 41ada5541a..0334303ce6 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -16,7 +16,8 @@ namespace fastdeploy { -void BindPpClsModel(pybind11::module& m); +void BindPPCls(pybind11::module& m); +void BindPPDet(pybind11::module& m); void BindWongkinyiu(pybind11::module& m); void BindUltralytics(pybind11::module& m); void BindMeituan(pybind11::module& m); @@ -41,13 +42,14 @@ void BindVision(pybind11::module& m) { .def("__repr__", &vision::DetectionResult::Str) .def("__str__", &vision::DetectionResult::Str); - BindPpClsModel(m); + BindPPCls(m); + BindPPDet(m); BindUltralytics(m); BindWongkinyiu(m); BindMeituan(m); BindMegvii(m); #ifdef ENABLE_VISION_VISUALIZE BindVisualize(m); -#endif +#endif } -} // namespace fastdeploy +} // namespace fastdeploy diff --git a/fastdeploy/vision/visualize/detection.cc b/fastdeploy/vision/visualize/detection.cc index d0c4116148..5b5538bff7 100644 --- a/fastdeploy/vision/visualize/detection.cc +++ b/fastdeploy/vision/visualize/detection.cc @@ -43,7 +43,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, } std::string text = id + "," + score; int font = cv::FONT_HERSHEY_SIMPLEX; - cv::Size text_size = cv::getTextSize(text, font, font_size, 0.5, nullptr); + cv::Size text_size = cv::getTextSize(text, font, font_size, 1, nullptr); cv::Point origin; origin.x = rect.x; origin.y = rect.y; @@ -52,7 +52,7 @@ void Visualize::VisDetection(cv::Mat* im, const DetectionResult& result, text_size.width, text_size.height); cv::rectangle(*im, rect, rect_color, line_size); cv::putText(*im, text, origin, font, font_size, cv::Scalar(255, 255, 255), - 0.5); + 1); } } diff --git a/model_zoo/vision/ppyoloe/README.md b/model_zoo/vision/ppyoloe/README.md new file mode 100644 index 0000000000..42d18104ad --- /dev/null +++ b/model_zoo/vision/ppyoloe/README.md @@ -0,0 +1,52 @@ +# PaddleDetection/PPYOLOE部署示例 + +- 当前支持PaddleDetection版本为[release/2.4](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4) + +本文档说明如何进行[PPYOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/ppyoloe)的快速部署推理。本目录结构如下 +``` +. +├── cpp # C++ 代码目录 +│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 +│   ├── README.md # C++ 代码编译部署文档 +│   └── ppyoloe.cc # C++ 示例代码 +├── README.md # PPYOLOE 部署文档 +└── ppyoloe.py # Python示例代码 +``` + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python +``` + +## Python部署 + +执行如下代码即会自动下载PPYOLOE模型和测试图片 +``` +python ppyoloe.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 +414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 +163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 +267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 +581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 +104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 +348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 +364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 +75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 +328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 +504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 +379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 +25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [PPYOLOE API文档](./api.md) diff --git a/model_zoo/vision/ppyoloe/api.md b/model_zoo/vision/ppyoloe/api.md new file mode 100644 index 0000000000..1c5cbcaadb --- /dev/null +++ b/model_zoo/vision/ppyoloe/api.md @@ -0,0 +1,74 @@ +# PPYOLOE API说明 + +## Python API + +### PPYOLOE类 +``` +fastdeploy.vision.ultralytics.PPYOLOE(model_file, params_file, config_file, runtime_option=None, model_format=fd.Frontend.PADDLE) +``` +PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 模型推理配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> PPYOLOE.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) + +示例代码参考[ppyoloe.py](./ppyoloe.py) + + +## C++ API + +### PPYOLOE类 +``` +fastdeploy::vision::ultralytics::PPYOLOE( + const string& model_file, + const string& params_file, + const string& config_file, + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +PPYOLOE模型加载和初始化,需同时提供model_file和params_file, 当前仅支持model_format为Paddle格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **config_file**(str): 模型推理配置文件 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值(当模型中包含nms处理时,此参数自动无效) + +示例代码参考[cpp/yolov5.cc](cpp/yolov5.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt new file mode 100644 index 0000000000..e681566517 --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(ppyoloe_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(ppyoloe_demo ${PROJECT_SOURCE_DIR}/ppyoloe.cc) +# 添加FastDeploy库依赖 +target_link_libraries(ppyoloe_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/ppyoloe/cpp/README.md b/model_zoo/vision/ppyoloe/cpp/README.md new file mode 100644 index 0000000000..1027c2eeb2 --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/README.md @@ -0,0 +1,39 @@ +# 编译PPYOLOE示例 + + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 下载模型和图片 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz +tar xvf ppyoloe_crn_l_300e_coco.tgz +wget https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg + +# 执行 +./ppyoloe_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +162.380249,132.057449, 463.178345, 413.167114, 0.962918, 33 +414.914642,141.148666, 91.275269, 308.688293, 0.951003, 0 +163.449234,129.669067, 35.253891, 135.111786, 0.900734, 0 +267.232239,142.290436, 31.578918, 126.329773, 0.848709, 0 +581.790833,179.027115, 30.893127, 135.484940, 0.837986, 0 +104.407021,72.602615, 22.900627, 75.469055, 0.796468, 0 +348.795380,70.122147, 18.806061, 85.829330, 0.785557, 0 +364.118683,92.457428, 17.437622, 89.212891, 0.774282, 0 +75.180283,192.470490, 41.898407, 55.552414, 0.712569, 56 +328.133759,61.894299, 19.100616, 65.633575, 0.710519, 0 +504.797760,181.732574, 107.740814, 248.115082, 0.708902, 0 +379.063080,64.762360, 15.956146, 68.312546, 0.680725, 0 +25.858747,186.564178, 34.958130, 56.007080, 0.580415, 0 +``` diff --git a/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc new file mode 100644 index 0000000000..e63f29e62a --- /dev/null +++ b/model_zoo/vision/ppyoloe/cpp/ppyoloe.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "ppyoloe_crn_l_300e_coco/model.pdmodel"; + std::string params_file = "ppyoloe_crn_l_300e_coco/model.pdiparams"; + std::string config_file = "ppyoloe_crn_l_300e_coco/infer_cfg.yml"; + std::string img_path = "000000014439_640x640.jpg"; + std::string vis_path = "vis.jpeg"; + + auto model = vis::ppdet::PPYOLOE(model_file, params_file, config_file); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/model_zoo/vision/ppyoloe/ppyoloe.py b/model_zoo/vision/ppyoloe/ppyoloe.py new file mode 100644 index 0000000000..7d79dfd8cf --- /dev/null +++ b/model_zoo/vision/ppyoloe/ppyoloe.py @@ -0,0 +1,24 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +model_url = "https://bj.bcebos.com/paddle2onnx/fastdeploy/models/ppdet/ppyoloe_crn_l_300e_coco.tgz" +test_jpg_url = "https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/demo/000000014439_640x640.jpg" +fd.download_and_decompress(model_url, ".") +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.ppdet.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel", + "ppyoloe_crn_l_300e_coco/model.pdiparams", + "ppyoloe_crn_l_300e_coco/infer_cfg.yml") + +# 预测图片 +im = cv2.imread("000000014439_640x640.jpg") +result = model.predict(im, conf_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) diff --git a/setup.py b/setup.py index f0ff3f16de..e76f057b1c 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,8 @@ setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF") setup_configs["WITH_GPU"] = os.getenv("WITH_GPU", "OFF") setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED") -setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", "/usr/local/cuda") +setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", + "/usr/local/cuda") TOP_DIR = os.path.realpath(os.path.dirname(__file__)) SRC_DIR = os.path.join(TOP_DIR, "fastdeploy") @@ -325,17 +326,32 @@ def run(self): shutil.copy("LICENSE", "fastdeploy") depend_libs = list() - # modify the search path of libraries - command = "patchelf --set-rpath '$ORIGIN/libs/' .setuptools-cmake-build/fastdeploy_main.cpython-36m-x86_64-linux-gnu.so" - # The sw_64 not suppot patchelf, so we just disable that. - if platform.machine() != 'sw_64' and platform.machine() != 'mips64': - assert os.system(command) == 0, "patch fastdeploy_main.cpython-36m-x86_64-linux-gnu.so failed, the command: {}".format(command) + if platform.system().lower() == "linux": + for f in os.listdir(".setuptools-cmake-build"): + full_name = os.path.join(".setuptools-cmake-build", f) + if not os.path.isfile(full_name): + continue + if not full_name.count("fastdeploy_main.cpython-"): + continue + if not full_name.endswith(".so"): + continue + # modify the search path of libraries + command = "patchelf --set-rpath '$ORIGIN/libs/' {}".format( + full_name) + # The sw_64 not suppot patchelf, so we just disable that. + if platform.machine() != 'sw_64' and platform.machine( + ) != 'mips64': + assert os.system( + command + ) == 0, "patch fastdeploy_main.cpython-36m-x86_64-linux-gnu.so failed, the command: {}".format( + command) for f in os.listdir(".setuptools-cmake-build"): if not os.path.isfile(os.path.join(".setuptools-cmake-build", f)): continue if f.count("libfastdeploy") > 0: - shutil.copy(os.path.join(".setuptools-cmake-build", f), "fastdeploy/libs") + shutil.copy( + os.path.join(".setuptools-cmake-build", f), "fastdeploy/libs") for dirname in os.listdir(".setuptools-cmake-build/third_libs/install"): for lib in os.listdir( os.path.join(".setuptools-cmake-build/third_libs/install", From 013921ac21f7a77aa9a7f6ca98bb25990b4d9c19 Mon Sep 17 00:00:00 2001 From: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com> Date: Thu, 21 Jul 2022 10:40:44 +0800 Subject: [PATCH 38/58] Yolor (#16) * Develop (#11) (#12) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * Develop (#13) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: Jason Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * documents * Develop (#14) * Fix compile problem in different python version (#26) * fix some usage problem in linux * Fix compile problem Co-authored-by: root * Add PaddleDetetion/PPYOLOE model support (#22) * add ppdet/ppyoloe * Add demo code and documents * add convert processor to vision (#27) * update .gitignore * Added checking for cmake include dir * fixed missing trt_backend option bug when init from trt * remove un-need data layout and add pre-check for dtype * changed RGB2BRG to BGR2RGB in ppcls model * add model_zoo yolov6 c++/python demo * fixed CMakeLists.txt typos * update yolov6 cpp/README.md * add yolox c++/pybind and model_zoo demo * move some helpers to private * fixed CMakeLists.txt typos * add normalize with alpha and beta * add version notes for yolov5/yolov6/yolox * add copyright to yolov5.cc * revert normalize * fixed some bugs in yolox * fixed examples/CMakeLists.txt to avoid conflicts * add convert processor to vision * format examples/CMakeLists summary * Fix bug while the inference result is empty with YOLOv5 (#29) * Add multi-label function for yolov5 * Update README.md Update doc * Update fastdeploy_runtime.cc fix variable option.trt_max_shape wrong name * Update runtime_option.md Update resnet model dynamic shape setting name from images to x * Fix bug when inference result boxes are empty * Delete detection.py Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason Co-authored-by: root Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> Co-authored-by: huangjianhui <852142024@qq.com> Co-authored-by: Jason <928090362@qq.com> --- fastdeploy/vision/wongkinyiu/__init__.py | 2 +- model_zoo/vision/yolor/README.md | 13 ++++++------- model_zoo/vision/yolor/cpp/README.md | 14 ++++++++------ model_zoo/vision/yolov7/README.md | 4 ++-- model_zoo/vision/yolov7/cpp/README.md | 4 ++-- 5 files changed, 19 insertions(+), 18 deletions(-) diff --git a/fastdeploy/vision/wongkinyiu/__init__.py b/fastdeploy/vision/wongkinyiu/__init__.py index 026d10062f..3c77e85896 100644 --- a/fastdeploy/vision/wongkinyiu/__init__.py +++ b/fastdeploy/vision/wongkinyiu/__init__.py @@ -135,7 +135,7 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): return self._model.predict(input_image, conf_threshold, nms_iou_threshold) - # 一些跟YOLOv7模型有关的属性封装 + # 一些跟YOLOR模型有关的属性封装 # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) @property def size(self): diff --git a/model_zoo/vision/yolor/README.md b/model_zoo/vision/yolor/README.md index 467023f169..358e62bbe1 100644 --- a/model_zoo/vision/yolor/README.md +++ b/model_zoo/vision/yolor/README.md @@ -1,6 +1,7 @@ # 编译YOLOR示例 -当前支持模型版本为:[YOLOR v0.1](https://github.com/WongKinYiu/yolor/releases/tag/weights) +当前支持模型版本为:[YOLOR weights](https://github.com/WongKinYiu/yolor/releases/tag/weights) +(tips: 如果使用 `git clone` 的方式下载仓库代码,请将分支切换(checkout)到 `paper` 分支). 本文档说明如何进行[YOLOR](https://github.com/WongKinYiu/yolor)的快速部署推理。本目录结构如下 @@ -18,19 +19,17 @@ - 手动获取 - 访问[YOLOR](https://github.com/WongKinYiu/yolor)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 - - + 访问[YOLOR](https://github.com/WongKinYiu/yolor)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。如果您导出的`onnx`模型出现精度不达标或者是数据维度的问题,可以参考[yolor#32](https://github.com/WongKinYiu/yolor/issues/32)的解决办法 ``` #下载yolor模型文件 - wget https://github.com/WongKinYiu/yolor/releases/download/v0.1/yolor.pt + wget https://github.com/WongKinYiu/yolor/releases/download/weights/yolor-d6-paper-570.pt # 导出onnx格式文件 - python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + python models/export.py --weights PATH/TO/yolor-xx-xx-xx.pt --img-size 640 # 移动onnx文件到demo目录 - cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolor/ + cp PATH/TO/yolor.onnx PATH/TO/model_zoo/vision/yolor/ ``` ## 安装FastDeploy diff --git a/model_zoo/vision/yolor/cpp/README.md b/model_zoo/vision/yolor/cpp/README.md index eddf5bc51b..d06bbe3005 100644 --- a/model_zoo/vision/yolor/cpp/README.md +++ b/model_zoo/vision/yolor/cpp/README.md @@ -1,20 +1,22 @@ # 编译YOLOR示例 -当前支持模型版本为:[YOLOR v0.1](https://github.com/WongKinYiu/yolor/releases/tag/weights) - +当前支持模型版本为:[YOLOR weights](https://github.com/WongKinYiu/yolor/releases/tag/weights) +(tips: 如果使用 `git clone` 的方式下载仓库代码,请将分支切换(checkout)到 `paper` 分支). ## 获取ONNX文件 - 手动获取 - 访问[YOLOR](https://github.com/WongKinYiu/yolor/releases/tag/weights)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。 + 访问[YOLOR](https://github.com/WongKinYiu/yolor)官方github库,按照指引下载安装,下载`yolor.pt` 模型,利用 `models/export.py` 得到`onnx`格式文件。如果您导出的`onnx`模型出现精度不达标或者是数据维度的问题,可以参考[yolor#32](https://github.com/WongKinYiu/yolor/issues/32)的解决办法 ``` #下载yolor模型文件 - wget https://github.com/WongKinYiu/yolor/releases/download/v0.1/yolor.pt + wget https://github.com/WongKinYiu/yolor/releases/download/weights/yolor-d6-paper-570.pt # 导出onnx格式文件 - python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + python models/export.py --weights PATH/TO/yolor-xx-xx-xx.pt --img-size 640 + # 移动onnx文件到demo目录 + cp PATH/TO/yolor.onnx PATH/TO/model_zoo/vision/yolor/ ``` @@ -31,7 +33,7 @@ cmake .. make -j # 移动onnx文件到demo目录 -cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolor/cpp/build/ +cp PATH/TO/yolor.onnx PATH/TO/model_zoo/vision/yolor/cpp/build/ # 下载图片 wget https://raw.githubusercontent.com/WongKinYiu/yolor/paper/inference/images/horses.jpg diff --git a/model_zoo/vision/yolov7/README.md b/model_zoo/vision/yolov7/README.md index 2bb13ce459..8b2f06d761 100644 --- a/model_zoo/vision/yolov7/README.md +++ b/model_zoo/vision/yolov7/README.md @@ -27,10 +27,10 @@ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt # 导出onnx格式文件 - python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt # 移动onnx文件到demo目录 - cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/ + cp PATH/TO/yolov7.onnx PATH/TO/model_zoo/vision/yolov7/ ``` ## 安装FastDeploy diff --git a/model_zoo/vision/yolov7/cpp/README.md b/model_zoo/vision/yolov7/cpp/README.md index f216c1aecf..655e98678c 100644 --- a/model_zoo/vision/yolov7/cpp/README.md +++ b/model_zoo/vision/yolov7/cpp/README.md @@ -13,7 +13,7 @@ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt # 导出onnx格式文件 - python models/export.py --grid --dynamic --weights PATH/TO/yolo7.pt + python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt ``` @@ -31,7 +31,7 @@ cmake .. make -j # 移动onnx文件到demo目录 -cp PATH/TO/yolo7.onnx PATH/TO/model_zoo/vision/yolov7/cpp/build/ +cp PATH/TO/yolov7.onnx PATH/TO/model_zoo/vision/yolov7/cpp/build/ # 下载图片 wget https://raw.githubusercontent.com/WongKinYiu/yolov7/main/inference/images/horses.jpg From 90ca4cb0cd2c29a657dbe544d570b4498e4e35d7 Mon Sep 17 00:00:00 2001 From: ziqi-jin <67993288+ziqi-jin@users.noreply.github.com> Date: Fri, 29 Jul 2022 14:49:38 +0800 Subject: [PATCH 39/58] add is_dynamic for YOLO series (#22) --- csrcs/fastdeploy/vision/ppogg/yolov5lite.cc | 15 +++++++++++++++ csrcs/fastdeploy/vision/ppogg/yolov5lite.h | 10 ++++++++++ .../vision/wongkinyiu/scaledyolov4.cc | 15 +++++++++++++++ .../fastdeploy/vision/wongkinyiu/scaledyolov4.h | 10 ++++++++++ csrcs/fastdeploy/vision/wongkinyiu/yolor.cc | 17 ++++++++++++++++- csrcs/fastdeploy/vision/wongkinyiu/yolor.h | 10 ++++++++++ csrcs/fastdeploy/vision/wongkinyiu/yolov7.cc | 17 ++++++++++++++++- csrcs/fastdeploy/vision/wongkinyiu/yolov7.h | 10 ++++++++++ 8 files changed, 102 insertions(+), 2 deletions(-) diff --git a/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc b/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc index 320867f581..a84ead937a 100644 --- a/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc +++ b/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc @@ -118,6 +118,21 @@ bool YOLOv5Lite::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + if (!is_dynamic_input_) { + is_mini_pad = false; + } return true; } diff --git a/csrcs/fastdeploy/vision/ppogg/yolov5lite.h b/csrcs/fastdeploy/vision/ppogg/yolov5lite.h index 3eb556cfa3..669240e211 100644 --- a/csrcs/fastdeploy/vision/ppogg/yolov5lite.h +++ b/csrcs/fastdeploy/vision/ppogg/yolov5lite.h @@ -126,6 +126,16 @@ class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel { void GenerateAnchors(const std::vector& size, const std::vector& downsample_strides, std::vector* anchors, const int num_anchors = 3); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape + // or not.) + // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This + // value will + // auto check by fastdeploy after the internal Runtime already initialized. + bool is_dynamic_input_; }; } // namespace ppogg } // namespace vision diff --git a/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.cc b/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.cc index 7321fc01bb..a562c9b275 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.cc +++ b/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.cc @@ -89,6 +89,21 @@ bool ScaledYOLOv4::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + if (!is_dynamic_input_) { + is_mini_pad = false; + } return true; } diff --git a/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h b/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h index 39066a29ec..247d5221e1 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h +++ b/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h @@ -90,6 +90,16 @@ class FASTDEPLOY_DECL ScaledYOLOv4 : public FastDeployModel { const std::vector& color, bool _auto, bool scale_fill = false, bool scale_up = true, int stride = 32); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape + // or not.) + // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This + // value will + // auto check by fastdeploy after the internal Runtime already initialized. + bool is_dynamic_input_; }; } // namespace wongkinyiu } // namespace vision diff --git a/csrcs/fastdeploy/vision/wongkinyiu/yolor.cc b/csrcs/fastdeploy/vision/wongkinyiu/yolor.cc index 070ea72e60..7de994f2a4 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/yolor.cc +++ b/csrcs/fastdeploy/vision/wongkinyiu/yolor.cc @@ -87,6 +87,21 @@ bool YOLOR::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + if (!is_dynamic_input_) { + is_mini_pad = false; + } return true; } @@ -176,7 +191,7 @@ bool YOLOR::Postprocess( float pad_h = (out_h - ipt_h * scale) / 2.0f; float pad_w = (out_w - ipt_w * scale) / 2.0f; if (is_mini_pad) { - // 和 LetterBox中_auto=true的处理逻辑对应 + // 和 LetterBox中_auto=true的处理逻辑对应 pad_h = static_cast(static_cast(pad_h) % stride); pad_w = static_cast(static_cast(pad_w) % stride); } diff --git a/csrcs/fastdeploy/vision/wongkinyiu/yolor.h b/csrcs/fastdeploy/vision/wongkinyiu/yolor.h index 7597f42d32..b3a00663c1 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/yolor.h +++ b/csrcs/fastdeploy/vision/wongkinyiu/yolor.h @@ -89,6 +89,16 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel { const std::vector& color, bool _auto, bool scale_fill = false, bool scale_up = true, int stride = 32); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape + // or not.) + // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This + // value will + // auto check by fastdeploy after the internal Runtime already initialized. + bool is_dynamic_input_; }; } // namespace wongkinyiu } // namespace vision diff --git a/csrcs/fastdeploy/vision/wongkinyiu/yolov7.cc b/csrcs/fastdeploy/vision/wongkinyiu/yolov7.cc index 457f8800cf..6f603c87fc 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/yolov7.cc +++ b/csrcs/fastdeploy/vision/wongkinyiu/yolov7.cc @@ -88,6 +88,21 @@ bool YOLOv7::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + if (!is_dynamic_input_) { + is_mini_pad = false; + } return true; } @@ -177,7 +192,7 @@ bool YOLOv7::Postprocess( float pad_h = (out_h - ipt_h * scale) / 2.0f; float pad_w = (out_w - ipt_w * scale) / 2.0f; if (is_mini_pad) { - // 和 LetterBox中_auto=true的处理逻辑对应 + // 和 LetterBox中_auto=true的处理逻辑对应 pad_h = static_cast(static_cast(pad_h) % stride); pad_w = static_cast(static_cast(pad_w) % stride); } diff --git a/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h b/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h index 64e18ad47b..5dbdfb8f4a 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h +++ b/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h @@ -89,6 +89,16 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { const std::vector& color, bool _auto, bool scale_fill = false, bool scale_up = true, int stride = 32); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape + // or not.) + // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This + // value will + // auto check by fastdeploy after the internal Runtime already initialized. + bool is_dynamic_input_; }; } // namespace wongkinyiu } // namespace vision From 3590990bc2acb86fd473f4c34716323c14d12c35 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 08:06:17 +0000 Subject: [PATCH 40/58] first commit test photo --- .../detection/yolov5/cpp/CMakeLists.txt | 14 +++ .../vision/detection/yolov5/cpp/README.md | 77 +++++++++++++ examples/vision/detection/yolov5/cpp/infer.cc | 105 ++++++++++++++++++ .../vision/detection/yolov5/python/README.md | 71 ++++++++++++ .../vision/detection/yolov5/python/infer.py | 51 +++++++++ examples/vision/detection/yolov7/README.md | 11 +- .../vision/detection/yolov7/cpp/README.md | 20 ++-- .../vision/detection/yolov7/python/README.md | 26 +++-- 8 files changed, 353 insertions(+), 22 deletions(-) create mode 100644 examples/vision/detection/yolov5/cpp/CMakeLists.txt create mode 100644 examples/vision/detection/yolov5/cpp/README.md create mode 100644 examples/vision/detection/yolov5/cpp/infer.cc create mode 100644 examples/vision/detection/yolov5/python/README.md create mode 100644 examples/vision/detection/yolov5/python/infer.py diff --git a/examples/vision/detection/yolov5/cpp/CMakeLists.txt b/examples/vision/detection/yolov5/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/detection/yolov5/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/detection/yolov5/cpp/README.md b/examples/vision/detection/yolov5/cpp/README.md new file mode 100644 index 0000000000..6d4c7fe7fd --- /dev/null +++ b/examples/vision/detection/yolov5/cpp/README.md @@ -0,0 +1,77 @@ +# YOLOv7 C++部署示例 + +本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的yolov7模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000087038.jpg + + +# CPU推理 +./infer_demo yolov7.onnx 000000087038.jpg 0 +# GPU推理 +./infer_demo yolov7.onnx 000000087038.jpg 1 +# GPU上TensorRT推理 +./infer_demo yolov7.onnx 000000087038.jpg 2 +``` + +## YOLOv7 C++接口 + +### YOLOv7类 + +``` +fastdeploy::vision::detection::YOLOv7( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> YOLOv7::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolov5/cpp/infer.cc b/examples/vision/detection/yolov5/cpp/infer.cc new file mode 100644 index 0000000000..1ddca8f1c8 --- /dev/null +++ b/examples/vision/detection/yolov5/cpp/infer.cc @@ -0,0 +1,105 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +void CpuInfer(const std::string& model_file, const std::string& image_file) { + auto model = fastdeploy::vision::detection::YOLOv7(model_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void GpuInfer(const std::string& model_file, const std::string& image_file) { + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + auto model = fastdeploy::vision::detection::YOLOv7(model_file, "", option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +void TrtInfer(const std::string& model_file, const std::string& image_file) { + auto option = fastdeploy::RuntimeOption(); + option.UseGpu(); + option.UseTrtBackend(); + option.SetTrtInputShape("images", {1, 3, 640, 640}); + auto model = fastdeploy::vision::detection::YOLOv7(model_file, "", option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + auto im_bak = im.clone(); + + fastdeploy::vision::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + +int main(int argc, char* argv[]) { + if (argc < 4) { + std::cout << "Usage: infer_demo path/to/model path/to/image run_option, " + "e.g ./infer_model ./yolov7.onnx ./test.jpeg 0" + << std::endl; + std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " + "with gpu; 2: run with gpu and use tensorrt backend." + << std::endl; + return -1; + } + + if (std::atoi(argv[3]) == 0) { + CpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 1) { + GpuInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 2) { + TrtInfer(argv[1], argv[2]); + } + return 0; +} diff --git a/examples/vision/detection/yolov5/python/README.md b/examples/vision/detection/yolov5/python/README.md new file mode 100644 index 0000000000..74078e2add --- /dev/null +++ b/examples/vision/detection/yolov5/python/README.md @@ -0,0 +1,71 @@ +# YOLOv7 Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载yolov7模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolov7/python/ + +# CPU推理 +python infer.py --model yolov7.onnx --image 000000087038.jpg --device cpu +# GPU推理 +python infer.py --model yolov7.onnx --image 000000087038.jpg --device gpu +# GPU上使用TensorRT推理 +python infer.py --model yolov7.onnx --image 000000087038.jpg --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + +## YOLOv7 Python接口 + +``` +fastdeploy.vision.detection.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list | tuple): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] + +## 其它文档 + +- [YOLOv7 模型介绍](..) +- [YOLOv7 C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolov5/python/infer.py b/examples/vision/detection/yolov5/python/infer.py new file mode 100644 index 0000000000..574755c3a3 --- /dev/null +++ b/examples/vision/detection/yolov5/python/infer.py @@ -0,0 +1,51 @@ +import fastdeploy as fd +import cv2 + + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model", required=True, help="Path of yolov7 onnx model.") + parser.add_argument( + "--image", required=True, help="Path of test image file.") + parser.add_argument( + "--device", + type=str, + default='cpu', + help="Type of inference device, support 'cpu' or 'gpu'.") + parser.add_argument( + "--use_trt", + type=ast.literal_eval, + default=False, + help="Wether to use tensorrt.") + return parser.parse_args() + + +def build_option(args): + option = fd.RuntimeOption() + + if args.device.lower() == "gpu": + option.use_gpu() + + if args.use_trt: + option.use_trt_backend() + option.set_trt_input_shape("images", [1, 3, 640, 640]) + return option + + +args = parse_arguments() + +# 配置runtime,加载模型 +runtime_option = build_option(args) +model = fd.vision.detection.YOLOv7(args.model, runtime_option=runtime_option) + +# 预测图片检测结果 +im = cv2.imread(args.image) +result = model.predict(im) + +# 预测结果可视化 +vis_im = fd.vision.vis_detection(im, result) +cv2.imwrite("visualized_result.jpg", vis_im) +print("Visualized result save in ./visualized_result.jpg") diff --git a/examples/vision/detection/yolov7/README.md b/examples/vision/detection/yolov7/README.md index 5f4848075d..995d278b11 100644 --- a/examples/vision/detection/yolov7/README.md +++ b/examples/vision/detection/yolov7/README.md @@ -3,13 +3,14 @@ ## 模型版本说明 - [YOLOv7 0.1](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1) - - (1)[YOLOv7 0.1](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)链接中.pt后缀模型通过[导出ONNX模型](#导出ONNX模型)操作后,可直接部署;.onnx、.trt和 .pose后缀模型暂不支持部署; - - (2)开发者基于自己数据训练的YOLOv7 0.1模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 + - (1)[链接中](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)的*.pt通过[导出ONNX模型](#导出ONNX模型)操作后,可进行部署; + - (2)[链接中](https://github.com/WongKinYiu/yolov7/releases/tag/v0.1)的*.onnx、*.trt和 *.pose模型不支持部署; + - (3)开发者基于自己数据训练的YOLOv7 0.1模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 ## 导出ONNX模型 ``` -# 下载yolov7模型文件,或准备训练好的YOLOv7模型文件 +# 下载yolov7模型文件 wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt # 导出onnx格式文件 (Tips: 对应 YOLOv7 release v0.1 代码) @@ -18,8 +19,8 @@ python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt # 如果您的代码版本中有支持NMS的ONNX文件导出,请使用如下命令导出ONNX文件(请暂时不要使用 "--end2end",我们后续将支持带有NMS的ONNX模型的部署) python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt -# 移动onnx文件到examples目录 -cp PATH/TO/yolov7.onnx PATH/TO/FastDeploy/examples/vision/detextion/yolov7/ +# 移动onnx文件到demo目录 +cp PATH/TO/yolov7.onnx PATH/TO/model_zoo/vision/yolov7/ ``` ## 下载预训练模型 diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md index 2dab72beb8..9e28ffcb5f 100644 --- a/examples/vision/detection/yolov7/cpp/README.md +++ b/examples/vision/detection/yolov7/cpp/README.md @@ -5,7 +5,7 @@ 在部署前,需确认以下两个步骤 - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) -- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuilt_libraries.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) 以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 @@ -19,15 +19,15 @@ make -j #下载官方转换好的yolov7模型文件和测试图片 wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx -wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000087038.jpg +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg # CPU推理 -./infer_demo yolov7.onnx 000000087038.jpg 0 +./infer_demo yolov7.onnx 000000014439.jpg 0 # GPU推理 -./infer_demo yolov7.onnx 000000087038.jpg 1 +./infer_demo yolov7.onnx 000000014439.jpg 1 # GPU上TensorRT推理 -./infer_demo yolov7.onnx 000000087038.jpg 2 +./infer_demo yolov7.onnx 000000014439.jpg 2 ``` ## YOLOv7 C++接口 @@ -58,11 +58,11 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` -> +> > 模型预测接口,输入图像直接输出检测结果。 -> +> > **参数** -> +> > > * **im**: 输入图像,注意需为HWC,BGR格式 > > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) > > * **conf_threshold**: 检测框置信度过滤阈值 @@ -71,6 +71,10 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 ### 类成员变量 > > * **size**(vector): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` - [模型介绍](../../) - [Python部署](../python) diff --git a/examples/vision/detection/yolov7/python/README.md b/examples/vision/detection/yolov7/python/README.md index c45d8a416c..b3a4f12a1b 100644 --- a/examples/vision/detection/yolov7/python/README.md +++ b/examples/vision/detection/yolov7/python/README.md @@ -18,15 +18,17 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git cd examples/vison/detection/yolov7/python/ # CPU推理 -python infer.py --model yolov7.onnx --image 000000087038.jpg --device cpu +python infer.py --model yolov7.onnx --image 000000014439.jpg --device cpu # GPU推理 -python infer.py --model yolov7.onnx --image 000000087038.jpg --device gpu -# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) -python infer.py --model yolov7.onnx --image 000000087038.jpg --device gpu --use_trt True +python infer.py --model yolov7.onnx --image 000000014439.jpg --device gpu +# GPU上使用TensorRT推理 +python infer.py --model yolov7.onnx --image 000000014439.jpg --device gpu --use_trt True ``` 运行完成可视化结果如下图所示 + + ## YOLOv7 Python接口 ``` @@ -47,22 +49,28 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 > ``` > YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) > ``` -> +> > 模型预测结口,输入图像直接输出检测结果。 -> +> > **参数** -> +> > > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 > > * **conf_threshold**(float): 检测框置信度过滤阈值 > > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 > **返回** -> +> > > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) ### 类成员属性 -> > * **size**(list | tuple): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + ## 其它文档 From 09c64ef6d992f29e2ac445578e2e7a629c3640c0 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 08:21:59 +0000 Subject: [PATCH 41/58] yolov7 doc --- examples/vision/{detection => }/README.md | 0 examples/vision/detection/yolov7/README.md | 8 +++++++- examples/vision/detection/yolov7/cpp/README.md | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) rename examples/vision/{detection => }/README.md (100%) diff --git a/examples/vision/detection/README.md b/examples/vision/README.md similarity index 100% rename from examples/vision/detection/README.md rename to examples/vision/README.md diff --git a/examples/vision/detection/yolov7/README.md b/examples/vision/detection/yolov7/README.md index 995d278b11..a661d9bd9f 100644 --- a/examples/vision/detection/yolov7/README.md +++ b/examples/vision/detection/yolov7/README.md @@ -30,7 +30,13 @@ cp PATH/TO/yolov7.onnx PATH/TO/model_zoo/vision/yolov7/ | 模型 | 大小 | 精度 | |:---------------------------------------------------------------- |:----- |:----- | | [YOLOv7](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx) | 141MB | 51.4% | -| [YOLOv7-x] | 10MB | 51.4% | +| [YOLOv7x](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7x.onnx) | 273MB | 53.1% | +| [YOLOv7-w6](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7-w6.onnx) | 269MB | 54.9% | +| [YOLOv7-e6](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7-e6.onnx) | 372MB | 56.0% | +| [YOLOv7-d6](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7-d6.onnx) | 511MB | 56.6% | +| [YOLOv7-e6e](https://bj.bcebos.com/paddlehub/fastdeploy/yolov7-e6e.onnx) | 579MB | 56.8% | + + ## 详细部署文档 diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md index 9e28ffcb5f..e308d35f35 100644 --- a/examples/vision/detection/yolov7/cpp/README.md +++ b/examples/vision/detection/yolov7/cpp/README.md @@ -30,6 +30,10 @@ wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/0000000 ./infer_demo yolov7.onnx 000000014439.jpg 2 ``` +运行完成可视化结果如下图所示 + + + ## YOLOv7 C++接口 ### YOLOv7类 From 19154992bfc67f15630194383b3b50bed2d1c4a5 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 08:26:00 +0000 Subject: [PATCH 42/58] yolov7 doc --- examples/vision/detection/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md index e308d35f35..5e4ee4eeac 100644 --- a/examples/vision/detection/yolov7/cpp/README.md +++ b/examples/vision/detection/yolov7/cpp/README.md @@ -74,7 +74,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 ### 类成员变量 -> > * **size**(vector): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **size**(vector< int>>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] > > * **padding_value**(vector): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] > > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` > > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` From c3cd45573b53f67de29992692baf683f65c2e51f Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 08:26:46 +0000 Subject: [PATCH 43/58] yolov7 doc --- examples/vision/detection/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md index 5e4ee4eeac..5c27318028 100644 --- a/examples/vision/detection/yolov7/cpp/README.md +++ b/examples/vision/detection/yolov7/cpp/README.md @@ -74,7 +74,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 ### 类成员变量 -> > * **size**(vector< int>>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **size**(vector<int>>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] > > * **padding_value**(vector): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] > > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` > > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` From 4c05253c0ccad7ba464cf67bc3882619d0e05bfd Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 08:30:12 +0000 Subject: [PATCH 44/58] yolov7 doc --- examples/vision/detection/yolov7/cpp/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md index 5c27318028..f6e23e3f1c 100644 --- a/examples/vision/detection/yolov7/cpp/README.md +++ b/examples/vision/detection/yolov7/cpp/README.md @@ -74,7 +74,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 ### 类成员变量 -> > * **size**(vector<int>>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] > > * **padding_value**(vector): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] > > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` > > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` From 49486ce41b0abafa6c85411f5138d78860929587 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 09:27:37 +0000 Subject: [PATCH 45/58] add yolov5 docs --- examples/vision/detection/yolov5/README.md | 28 ++++++++++++++ .../vision/detection/yolov5/cpp/README.md | 36 +++++++++++------- examples/vision/detection/yolov5/cpp/infer.cc | 8 ++-- .../vision/detection/yolov5/python/README.md | 38 +++++++++++-------- .../vision/detection/yolov5/python/infer.py | 4 +- examples/vision/detection/yolov7/README.md | 2 +- .../vision/detection/yolov7/cpp/README.md | 2 +- 7 files changed, 81 insertions(+), 37 deletions(-) create mode 100644 examples/vision/detection/yolov5/README.md diff --git a/examples/vision/detection/yolov5/README.md b/examples/vision/detection/yolov5/README.md new file mode 100644 index 0000000000..30e638944c --- /dev/null +++ b/examples/vision/detection/yolov5/README.md @@ -0,0 +1,28 @@ +# YOLOv7准备部署模型 + +## 模型版本说明 + +- [YOLOv5 v6.0](https://github.com/ultralytics/yolov5/releases/tag/v6.0) + - (1)[链接中](https://github.com/ultralytics/yolov5/releases/tag/v6.0)的*.onnx可直接进行部署; + - (2)开发者基于自己数据训练的YOLOv5 v6.0模型,可使用[YOLOv5](https://github.com/ultralytics/yolov5)中的`export.py`导出ONNX文件后后,完成部署。 + + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了YOLOv7导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [YOLOv5n](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n.onnx) | 1.9MB | 28.4% | +| [YOLOv5s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx) | 7.2MB | 37.2% | +| [YOLOv5m](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5m.onnx) | 21.2MB | 45.2% | +| [YOLOv5l](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5l.onnx) | 46.5MB | 48.8% | +| [YOLOv5x](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5x.onnx) | 86.7MB | 50.7% | + + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/detection/yolov5/cpp/README.md b/examples/vision/detection/yolov5/cpp/README.md index 6d4c7fe7fd..ceba2d0c38 100644 --- a/examples/vision/detection/yolov5/cpp/README.md +++ b/examples/vision/detection/yolov5/cpp/README.md @@ -1,6 +1,6 @@ -# YOLOv7 C++部署示例 +# YOLOv5 C++部署示例 -本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 +本目录下提供`infer.cc`快速完成YOLOv5在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 在部署前,需确认以下两个步骤 @@ -17,32 +17,36 @@ tar xvf fastdeploy-linux-x64-0.2.0.tgz cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 make -j -#下载官方转换好的yolov7模型文件和测试图片 -wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx -wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000087038.jpg +#下载官方转换好的yolov5模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg # CPU推理 -./infer_demo yolov7.onnx 000000087038.jpg 0 +./infer_demo yolov5.onnx 000000014439.jpg 0 # GPU推理 -./infer_demo yolov7.onnx 000000087038.jpg 1 +./infer_demo yolov5.onnx 000000014439.jpg 1 # GPU上TensorRT推理 -./infer_demo yolov7.onnx 000000087038.jpg 2 +./infer_demo yolov5.onnx 000000014439.jpg 2 ``` -## YOLOv7 C++接口 +运行完成可视化结果如下图所示 -### YOLOv7类 + + +## YOLOv5 C++接口 + +### YOLOv5类 ``` -fastdeploy::vision::detection::YOLOv7( +fastdeploy::vision::detection::YOLOv5( const string& model_file, const string& params_file = "", const RuntimeOption& runtime_option = RuntimeOption(), const Frontend& model_format = Frontend::ONNX) ``` -YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 +YOLOv5模型加载和初始化,其中model_file为导出的ONNX模型格式。 **参数** @@ -54,7 +58,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 #### Predict函数 > ``` -> YOLOv7::Predict(cv::Mat* im, DetectionResult* result, +> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` @@ -70,7 +74,11 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 ### 类成员变量 -> > * **size**(vector): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` - [模型介绍](../../) - [Python部署](../python) diff --git a/examples/vision/detection/yolov5/cpp/infer.cc b/examples/vision/detection/yolov5/cpp/infer.cc index 1ddca8f1c8..ef3e47ea1f 100644 --- a/examples/vision/detection/yolov5/cpp/infer.cc +++ b/examples/vision/detection/yolov5/cpp/infer.cc @@ -15,7 +15,7 @@ #include "fastdeploy/vision.h" void CpuInfer(const std::string& model_file, const std::string& image_file) { - auto model = fastdeploy::vision::detection::YOLOv7(model_file); + auto model = fastdeploy::vision::detection::YOLOv5(model_file); if (!model.Initialized()) { std::cerr << "Failed to initialize." << std::endl; return; @@ -38,7 +38,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { void GpuInfer(const std::string& model_file, const std::string& image_file) { auto option = fastdeploy::RuntimeOption(); option.UseGpu(); - auto model = fastdeploy::vision::detection::YOLOv7(model_file, "", option); + auto model = fastdeploy::vision::detection::YOLOv5(model_file, "", option); if (!model.Initialized()) { std::cerr << "Failed to initialize." << std::endl; return; @@ -63,7 +63,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { option.UseGpu(); option.UseTrtBackend(); option.SetTrtInputShape("images", {1, 3, 640, 640}); - auto model = fastdeploy::vision::detection::YOLOv7(model_file, "", option); + auto model = fastdeploy::vision::detection::YOLOv5(model_file, "", option); if (!model.Initialized()) { std::cerr << "Failed to initialize." << std::endl; return; @@ -86,7 +86,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { int main(int argc, char* argv[]) { if (argc < 4) { std::cout << "Usage: infer_demo path/to/model path/to/image run_option, " - "e.g ./infer_model ./yolov7.onnx ./test.jpeg 0" + "e.g ./infer_model ./yolov5.onnx ./test.jpeg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " "with gpu; 2: run with gpu and use tensorrt backend." diff --git a/examples/vision/detection/yolov5/python/README.md b/examples/vision/detection/yolov5/python/README.md index 74078e2add..6d099e7d7b 100644 --- a/examples/vision/detection/yolov5/python/README.md +++ b/examples/vision/detection/yolov5/python/README.md @@ -1,39 +1,41 @@ -# YOLOv7 Python部署示例 +# YOLOv5 Python部署示例 在部署前,需确认以下两个步骤 - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) - 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) -本目录下提供`infer.py`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 +本目录下提供`infer.py`快速完成YOLOv5在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 ``` -#下载yolov7模型文件和测试图片 -wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7.onnx +#下载yolov5模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5.onnx wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg #下载部署示例代码 git clone https://github.com/PaddlePaddle/FastDeploy.git -cd examples/vison/detection/yolov7/python/ +cd examples/vison/detection/yolov5/python/ # CPU推理 -python infer.py --model yolov7.onnx --image 000000087038.jpg --device cpu +python infer.py --model yolov5.onnx --image 000000014439.jpg --device cpu # GPU推理 -python infer.py --model yolov7.onnx --image 000000087038.jpg --device gpu +python infer.py --model yolov5.onnx --image 000000014439.jpg --device gpu # GPU上使用TensorRT推理 -python infer.py --model yolov7.onnx --image 000000087038.jpg --device gpu --use_trt True +python infer.py --model yolov5.onnx --image 000000014439.jpg --device gpu --use_trt True ``` 运行完成可视化结果如下图所示 -## YOLOv7 Python接口 + + +## YOLOv5 Python接口 ``` -fastdeploy.vision.detection.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +fastdeploy.vision.detection.YOLOv5(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) ``` -YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 +YOLOv5模型加载和初始化,其中model_file为导出的ONNX模型格式 **参数** @@ -45,7 +47,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 ### predict函数 > ``` -> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> YOLOv5.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) > ``` > > 模型预测结口,输入图像直接输出检测结果。 @@ -62,10 +64,16 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 ### 类成员属性 -> > * **size**(list | tuple): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + ## 其它文档 -- [YOLOv7 模型介绍](..) -- [YOLOv7 C++部署](../cpp) +- [YOLOv5 模型介绍](..) +- [YOLOv5 C++部署](../cpp) - [模型预测结果说明](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolov5/python/infer.py b/examples/vision/detection/yolov5/python/infer.py index 574755c3a3..3f7a91f99d 100644 --- a/examples/vision/detection/yolov5/python/infer.py +++ b/examples/vision/detection/yolov5/python/infer.py @@ -7,7 +7,7 @@ def parse_arguments(): import ast parser = argparse.ArgumentParser() parser.add_argument( - "--model", required=True, help="Path of yolov7 onnx model.") + "--model", required=True, help="Path of yolov5 onnx model.") parser.add_argument( "--image", required=True, help="Path of test image file.") parser.add_argument( @@ -39,7 +39,7 @@ def build_option(args): # 配置runtime,加载模型 runtime_option = build_option(args) -model = fd.vision.detection.YOLOv7(args.model, runtime_option=runtime_option) +model = fd.vision.detection.YOLOv5(args.model, runtime_option=runtime_option) # 预测图片检测结果 im = cv2.imread(args.image) diff --git a/examples/vision/detection/yolov7/README.md b/examples/vision/detection/yolov7/README.md index a661d9bd9f..857bdda31d 100644 --- a/examples/vision/detection/yolov7/README.md +++ b/examples/vision/detection/yolov7/README.md @@ -23,7 +23,7 @@ python models/export.py --grid --dynamic --weights PATH/TO/yolov7.pt cp PATH/TO/yolov7.onnx PATH/TO/model_zoo/vision/yolov7/ ``` -## 下载预训练模型 +## 下载预训练ONNX模型 为了方便开发者的测试,下面提供了YOLOv7导出的各系列模型,开发者可直接下载使用。 diff --git a/examples/vision/detection/yolov7/cpp/README.md b/examples/vision/detection/yolov7/cpp/README.md index f6e23e3f1c..c67689570d 100644 --- a/examples/vision/detection/yolov7/cpp/README.md +++ b/examples/vision/detection/yolov7/cpp/README.md @@ -75,7 +75,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 ### 类成员变量 > > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] -> > * **padding_value**(vector): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] > > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` > > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` > > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` From 6034490ab0d2dc5f98f1de0c13404f5b114d6f95 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 12:47:15 +0000 Subject: [PATCH 46/58] modify yolov5 doc --- examples/vision/detection/yolov5/cpp/README.md | 8 ++++---- examples/vision/detection/yolov5/python/README.md | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/vision/detection/yolov5/cpp/README.md b/examples/vision/detection/yolov5/cpp/README.md index ceba2d0c38..feb44d13df 100644 --- a/examples/vision/detection/yolov5/cpp/README.md +++ b/examples/vision/detection/yolov5/cpp/README.md @@ -18,16 +18,16 @@ cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 make -j #下载官方转换好的yolov5模型文件和测试图片 -wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5.onnx +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg # CPU推理 -./infer_demo yolov5.onnx 000000014439.jpg 0 +./infer_demo yolov5s.onnx 000000014439.jpg 0 # GPU推理 -./infer_demo yolov5.onnx 000000014439.jpg 1 +./infer_demo yolov5s.onnx 000000014439.jpg 1 # GPU上TensorRT推理 -./infer_demo yolov5.onnx 000000014439.jpg 2 +./infer_demo yolov5s.onnx 000000014439.jpg 2 ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/detection/yolov5/python/README.md b/examples/vision/detection/yolov5/python/README.md index 6d099e7d7b..57cdba44cb 100644 --- a/examples/vision/detection/yolov5/python/README.md +++ b/examples/vision/detection/yolov5/python/README.md @@ -9,7 +9,7 @@ ``` #下载yolov5模型文件和测试图片 -wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5.onnx +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s.onnx wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg @@ -18,11 +18,11 @@ git clone https://github.com/PaddlePaddle/FastDeploy.git cd examples/vison/detection/yolov5/python/ # CPU推理 -python infer.py --model yolov5.onnx --image 000000014439.jpg --device cpu +python infer.py --model yolov5s.onnx --image 000000014439.jpg --device cpu # GPU推理 -python infer.py --model yolov5.onnx --image 000000014439.jpg --device gpu +python infer.py --model yolov5s.onnx --image 000000014439.jpg --device gpu # GPU上使用TensorRT推理 -python infer.py --model yolov5.onnx --image 000000014439.jpg --device gpu --use_trt True +python infer.py --model yolov5s.onnx --image 000000014439.jpg --device gpu --use_trt True ``` 运行完成可视化结果如下图所示 From 08bf982f103cf24ad9080fb4fb8f43e2bc228de4 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 13:51:46 +0000 Subject: [PATCH 47/58] first commit for retinaface --- examples/vision/facedet/retinaface/README.md | 54 ++++++++++++ .../facedet/retinaface/cpp/CMakeLists.txt | 14 +++ .../vision/facedet/retinaface/cpp/README.md | 85 +++++++++++++++++++ .../facedet/retinaface/python/README.md | 79 +++++++++++++++++ 4 files changed, 232 insertions(+) create mode 100644 examples/vision/facedet/retinaface/README.md create mode 100644 examples/vision/facedet/retinaface/cpp/CMakeLists.txt create mode 100644 examples/vision/facedet/retinaface/cpp/README.md create mode 100644 examples/vision/facedet/retinaface/python/README.md diff --git a/examples/vision/facedet/retinaface/README.md b/examples/vision/facedet/retinaface/README.md new file mode 100644 index 0000000000..b545b98d21 --- /dev/null +++ b/examples/vision/facedet/retinaface/README.md @@ -0,0 +1,54 @@ +# RetinaFace准备部署模型 + +## 模型版本说明 + +- [RetinaFace CommitID:b984b4b](https://github.com/biubug6/Pytorch_Retinaface/commit/b984b4b) + - (1)[链接中](https://github.com/biubug6/Pytorch_Retinaface/commit/b984b4b)的*.pt通过[导出ONNX模型](#导出ONNX模型)操作后,可进行部署; + - (2)开发者基于自己数据训练的RetinaFace CommitID:b984b4b模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 + +## 导出ONNX模型 + +自动下载的模型文件是我们事先转换好的,如果您需要从RetinaFace官方repo导出ONNX,请参考以下步骤。 + +* 下载官方仓库并 +```bash +git clone https://github.com/biubug6/Pytorch_Retinaface.git +``` +* 下载预训练权重并放在weights文件夹 +```text +./weights/ + mobilenet0.25_Final.pth + mobilenetV1X0.25_pretrain.tar + Resnet50_Final.pth +``` +* 运行convert_to_onnx.py导出ONNX模型文件 +```bash +PYTHONPATH=. python convert_to_onnx.py --trained_model ./weights/mobilenet0.25_Final.pth --network mobile0.25 --long_side 640 --cpu +PYTHONPATH=. python convert_to_onnx.py --trained_model ./weights/Resnet50_Final.pth --network resnet50 --long_side 640 --cpu +``` +注意:需要先对convert_to_onnx.py脚本中的--long_side参数增加类型约束,type=int. +* 使用onnxsim对模型进行简化 +```bash +onnxsim FaceDetector.onnx Pytorch_RetinaFace_mobile0.25-640-640.onnx # mobilenet +onnxsim FaceDetector.onnx Pytorch_RetinaFace_resnet50-640-640.onnx # resnet50 +``` + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了RetinaFace导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [RetinaFace_mobile0.25-640](https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx) | 1.7MB | - | +| [RetinaFace_mobile0.25-720](https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-720-1080.onnx) | 1.7MB | -| +| [RetinaFace_resnet50-640](https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_resnet50-720-1080.onnx) | 105MB | - | +| [RetinaFace_resnet50-720](https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_resnet50-640-640.onnx) | 105MB | - | + + + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/facedet/retinaface/cpp/CMakeLists.txt b/examples/vision/facedet/retinaface/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/facedet/retinaface/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/facedet/retinaface/cpp/README.md b/examples/vision/facedet/retinaface/cpp/README.md new file mode 100644 index 0000000000..b14b92bbba --- /dev/null +++ b/examples/vision/facedet/retinaface/cpp/README.md @@ -0,0 +1,85 @@ +# YOLOv7 C++部署示例 + +本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的yolov7模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx +wget todo + + +# CPU推理 +./infer_demo Pytorch_RetinaFace_mobile0.25-640-640.onnx todo 0 +# GPU推理 +./infer_demo Pytorch_RetinaFace_mobile0.25-640-640.onnx todo 1 +# GPU上TensorRT推理 +./infer_demo Pytorch_RetinaFace_mobile0.25-640-640.onnx todo 2 +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv7 C++接口 + +### YOLOv7类 + +``` +fastdeploy::vision::detection::YOLOv7( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> YOLOv7::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/facedet/retinaface/python/README.md b/examples/vision/facedet/retinaface/python/README.md new file mode 100644 index 0000000000..2ef4bfd132 --- /dev/null +++ b/examples/vision/facedet/retinaface/python/README.md @@ -0,0 +1,79 @@ +# YOLOv7 Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载yolov7模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/todo + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolov7/python/ + +# CPU推理 +python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device cpu +# GPU推理 +python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device gpu +# GPU上使用TensorRT推理 +python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv7 Python接口 + +``` +fastdeploy.vision.detection.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [YOLOv7 模型介绍](..) +- [YOLOv7 C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) From 5e9518063302ee9b0add91cf2427ecbf03691172 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 13:54:17 +0000 Subject: [PATCH 48/58] first commit for retinaface --- examples/vision/facedet/retinaface/cpp/README.md | 16 ++++++++-------- .../vision/facedet/retinaface/python/README.md | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/examples/vision/facedet/retinaface/cpp/README.md b/examples/vision/facedet/retinaface/cpp/README.md index b14b92bbba..dc36657076 100644 --- a/examples/vision/facedet/retinaface/cpp/README.md +++ b/examples/vision/facedet/retinaface/cpp/README.md @@ -1,6 +1,6 @@ -# YOLOv7 C++部署示例 +# RetinaFace C++部署示例 -本目录下提供`infer.cc`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 +本目录下提供`infer.cc`快速完成RetinaFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 在部署前,需确认以下两个步骤 @@ -17,7 +17,7 @@ tar xvf fastdeploy-linux-x64-0.2.0.tgz cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 make -j -#下载官方转换好的yolov7模型文件和测试图片 +#下载官方转换好的RetinaFace模型文件和测试图片 wget https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx wget todo @@ -34,19 +34,19 @@ wget todo -## YOLOv7 C++接口 +## RetinaFace C++接口 -### YOLOv7类 +### RetinaFace类 ``` -fastdeploy::vision::detection::YOLOv7( +fastdeploy::vision::facedet::RetinaFace( const string& model_file, const string& params_file = "", const RuntimeOption& runtime_option = RuntimeOption(), const Frontend& model_format = Frontend::ONNX) ``` -YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 +RetinaFace模型加载和初始化,其中model_file为导出的ONNX模型格式。 **参数** @@ -58,7 +58,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式。 #### Predict函数 > ``` -> YOLOv7::Predict(cv::Mat* im, DetectionResult* result, +> RetinaFace::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` diff --git a/examples/vision/facedet/retinaface/python/README.md b/examples/vision/facedet/retinaface/python/README.md index 2ef4bfd132..d7c295c375 100644 --- a/examples/vision/facedet/retinaface/python/README.md +++ b/examples/vision/facedet/retinaface/python/README.md @@ -1,11 +1,11 @@ -# YOLOv7 Python部署示例 +# RetinaFace Python部署示例 在部署前,需确认以下两个步骤 - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) - 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) -本目录下提供`infer.py`快速完成YOLOv7在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 +本目录下提供`infer.py`快速完成RetinaFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 ``` #下载yolov7模型文件和测试图片 @@ -29,13 +29,13 @@ python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo -## YOLOv7 Python接口 +## RetinaFace Python接口 ``` -fastdeploy.vision.detection.YOLOv7(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +fastdeploy.vision.facedet.RetinaFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) ``` -YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 +RetinaFace模型加载和初始化,其中model_file为导出的ONNX模型格式 **参数** @@ -47,7 +47,7 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 ### predict函数 > ``` -> YOLOv7.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> RetinaFace.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) > ``` > > 模型预测结口,输入图像直接输出检测结果。 @@ -74,6 +74,6 @@ YOLOv7模型加载和初始化,其中model_file为导出的ONNX模型格式 ## 其它文档 -- [YOLOv7 模型介绍](..) -- [YOLOv7 C++部署](../cpp) +- [RetinaFace 模型介绍](..) +- [RetinaFace C++部署](../cpp) - [模型预测结果说明](../../../../../docs/api/vision_results/) From 80d1ca64899d229d9d63dcf282f207db1d38bb7a Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:08:33 +0000 Subject: [PATCH 49/58] firt commit for ultraface --- .../facedet/retinaface/python/README.md | 2 +- examples/vision/facedet/ultraface/README.md | 23 +++++ .../facedet/ultraface/cpp/CMakeLists.txt | 14 +++ .../vision/facedet/ultraface/cpp/README.md | 85 +++++++++++++++++++ .../vision/facedet/ultraface/python/README.md | 79 +++++++++++++++++ 5 files changed, 202 insertions(+), 1 deletion(-) create mode 100644 examples/vision/facedet/ultraface/README.md create mode 100644 examples/vision/facedet/ultraface/cpp/CMakeLists.txt create mode 100644 examples/vision/facedet/ultraface/cpp/README.md create mode 100644 examples/vision/facedet/ultraface/python/README.md diff --git a/examples/vision/facedet/retinaface/python/README.md b/examples/vision/facedet/retinaface/python/README.md index d7c295c375..c063bedc1c 100644 --- a/examples/vision/facedet/retinaface/python/README.md +++ b/examples/vision/facedet/retinaface/python/README.md @@ -10,7 +10,7 @@ ``` #下载yolov7模型文件和测试图片 wget https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx -wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/todo +wget todo #下载部署示例代码 diff --git a/examples/vision/facedet/ultraface/README.md b/examples/vision/facedet/ultraface/README.md new file mode 100644 index 0000000000..f1dcca0b98 --- /dev/null +++ b/examples/vision/facedet/ultraface/README.md @@ -0,0 +1,23 @@ +# UltraFace准备部署模型 + +## 模型版本说明 + +- [UltraFace CommitID:dffdddd](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB/commit/dffdddd) + - (1)[链接中](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB/commit/dffdddd)的*.onnx可下载, 也可以通过下面模型链接下载并进行部署 + + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了UltraFace导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [RFB-320](https://bj.bcebos.com/paddlehub/fastdeploy/version-RFB-320.onnx) | 1.3MB | - | +| [RFB-320-sim](https://bj.bcebos.com/paddlehub/fastdeploy/version-RFB-320-sim.onnx) | 1.2MB | -| + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/facedet/ultraface/cpp/CMakeLists.txt b/examples/vision/facedet/ultraface/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/facedet/ultraface/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/facedet/ultraface/cpp/README.md b/examples/vision/facedet/ultraface/cpp/README.md new file mode 100644 index 0000000000..1eae69c0fb --- /dev/null +++ b/examples/vision/facedet/ultraface/cpp/README.md @@ -0,0 +1,85 @@ +# UltraFace C++部署示例 + +本目录下提供`infer.cc`快速完成UltraFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的UltraFace模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/version-RFB-320.onnx +wget todo + + +# CPU推理 +./infer_demo version-RFB-320.onnx todo 0 +# GPU推理 +./infer_demo version-RFB-320.onnx todo 1 +# GPU上TensorRT推理 +./infer_demo version-RFB-320.onnx todo 2 +``` + +运行完成可视化结果如下图所示 + + + +## UltraFace C++接口 + +### UltraFace类 + +``` +fastdeploy::vision::facedet::UltraFace( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +UltraFace模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> UltraFace::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/facedet/ultraface/python/README.md b/examples/vision/facedet/ultraface/python/README.md new file mode 100644 index 0000000000..df7545547a --- /dev/null +++ b/examples/vision/facedet/ultraface/python/README.md @@ -0,0 +1,79 @@ +# UltraFace Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成UltraFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载yolov7模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/version-RFB-320.onnx +wget todo + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolov7/python/ + +# CPU推理 +python infer.py --model version-RFB-320.onnx --image todo --device cpu +# GPU推理 +python infer.py --model version-RFB-320.onnx --image todo --device gpu +# GPU上使用TensorRT推理 +python infer.py --model version-RFB-320.onnx --image todo --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## UltraFace Python接口 + +``` +fastdeploy.vision.facedet.UltraFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +UltraFace模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> UltraFace.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [UltraFace 模型介绍](..) +- [UltraFace C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) From 4b97d57802569719895a365e1d91a846d759d5a8 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:11:04 +0000 Subject: [PATCH 50/58] firt commit for ultraface --- examples/vision/facedet/retinaface/python/README.md | 4 ++-- examples/vision/facedet/ultraface/python/README.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/vision/facedet/retinaface/python/README.md b/examples/vision/facedet/retinaface/python/README.md index c063bedc1c..b8c3251359 100644 --- a/examples/vision/facedet/retinaface/python/README.md +++ b/examples/vision/facedet/retinaface/python/README.md @@ -8,14 +8,14 @@ 本目录下提供`infer.py`快速完成RetinaFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 ``` -#下载yolov7模型文件和测试图片 +#下载retinaface模型文件和测试图片 wget https://bj.bcebos.com/paddlehub/fastdeploy/Pytorch_RetinaFace_mobile0.25-640-640.onnx wget todo #下载部署示例代码 git clone https://github.com/PaddlePaddle/FastDeploy.git -cd examples/vison/detection/yolov7/python/ +cd examples/vison/detection/retinaface/python/ # CPU推理 python infer.py --model Pytorch_RetinaFace_mobile0.25-640-640.onnx --image todo --device cpu diff --git a/examples/vision/facedet/ultraface/python/README.md b/examples/vision/facedet/ultraface/python/README.md index df7545547a..88026ecff3 100644 --- a/examples/vision/facedet/ultraface/python/README.md +++ b/examples/vision/facedet/ultraface/python/README.md @@ -8,14 +8,14 @@ 本目录下提供`infer.py`快速完成UltraFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 ``` -#下载yolov7模型文件和测试图片 +#下载ultraface模型文件和测试图片 wget https://bj.bcebos.com/paddlehub/fastdeploy/version-RFB-320.onnx wget todo #下载部署示例代码 git clone https://github.com/PaddlePaddle/FastDeploy.git -cd examples/vison/detection/yolov7/python/ +cd examples/vison/detection/ultraface/python/ # CPU推理 python infer.py --model version-RFB-320.onnx --image todo --device cpu From 7924e7ef06552e66037db00bbe7a2d9a3b2e895d Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:18:55 +0000 Subject: [PATCH 51/58] firt commit for yolov5face --- examples/vision/facedet/yolov5face/README.md | 46 ++++++++++ .../facedet/yolov5face/cpp/CMakeLists.txt | 14 +++ .../vision/facedet/yolov5face/cpp/README.md | 85 +++++++++++++++++++ .../facedet/yolov5face/python/README.md | 79 +++++++++++++++++ 4 files changed, 224 insertions(+) create mode 100644 examples/vision/facedet/yolov5face/README.md create mode 100644 examples/vision/facedet/yolov5face/cpp/CMakeLists.txt create mode 100644 examples/vision/facedet/yolov5face/cpp/README.md create mode 100644 examples/vision/facedet/yolov5face/python/README.md diff --git a/examples/vision/facedet/yolov5face/README.md b/examples/vision/facedet/yolov5face/README.md new file mode 100644 index 0000000000..22b9868e1b --- /dev/null +++ b/examples/vision/facedet/yolov5face/README.md @@ -0,0 +1,46 @@ +# YOLOv5Face准备部署模型 + +## 模型版本说明 + +- [YOLOv5Face CommitID:4fd1ead](https://github.com/deepcam-cn/yolov5-face/commit/4fd1ead) + - (1)[链接中](https://github.com/deepcam-cn/yolov5-face/commit/4fd1ead)的*.pt通过[导出ONNX模型](#导出ONNX模型)操作后,可进行部署; + - (2)开发者基于自己数据训练的YOLOv5Face CommitID:b984b4b模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 + +## 导出ONNX模型 + +访问[YOLOv5Face](https://github.com/deepcam-cn/yolov5-face)官方github库,按照指引下载安装,下载`yolov5s-face.pt` 模型,利用 `export.py` 得到`onnx`格式文件。 + +* 下载yolov5face模型文件 + ``` + Link: https://pan.baidu.com/s/1fyzLxZYx7Ja1_PCIWRhxbw Link: eq0q + https://drive.google.com/file/d/1zxaHeLDyID9YU4-hqK7KNepXIwbTkRIO/view?usp=sharing + ``` + +* 导出onnx格式文件 + ```bash + PYTHONPATH=. python export.py --weights weights/yolov5s-face.pt --img_size 640 640 --batch_size 1 + ``` +* onnx模型简化(可选) + ```bash + onnxsim yolov5s-face.onnx yolov5s-face.onnx + ``` +* 移动onnx文件到model_zoo/yolov5face的目录 + ```bash + cp PATH/TO/yolov5s-face.onnx PATH/TO/model_zoo/vision/yolov5face/ + ``` + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了YOLOv5Face导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [YOLOv5s-Face](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s-face.onnx) | 30MB | - | +| [YOLOv5s-Face-bak](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5face-s-640x640.bak.onnx) | 30MB | -| +| [YOLOv5l-Face](https://bj.bcebos.com/paddlehub/fastdeploy/yolov5face-l-640x640.onnx ) | 181MB | - | + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/facedet/yolov5face/cpp/CMakeLists.txt b/examples/vision/facedet/yolov5face/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/facedet/yolov5face/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/facedet/yolov5face/cpp/README.md b/examples/vision/facedet/yolov5face/cpp/README.md new file mode 100644 index 0000000000..ec0b48ad0a --- /dev/null +++ b/examples/vision/facedet/yolov5face/cpp/README.md @@ -0,0 +1,85 @@ +# YOLOv5Face C++部署示例 + +本目录下提供`infer.cc`快速完成YOLOv5Face在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的YOLOv5Face模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s-face.onnx +wget todo + + +# CPU推理 +./infer_demo yolov5s-face.onnx todo 0 +# GPU推理 +./infer_demo yolov5s-face.onnx todo 1 +# GPU上TensorRT推理 +./infer_demo yolov5s-face.onnx todo 2 +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv5Face C++接口 + +### YOLOv5Face类 + +``` +fastdeploy::vision::facedet::YOLOv5Face( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +YOLOv5Face模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> YOLOv5Face::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/facedet/yolov5face/python/README.md b/examples/vision/facedet/yolov5face/python/README.md new file mode 100644 index 0000000000..2fc847f008 --- /dev/null +++ b/examples/vision/facedet/yolov5face/python/README.md @@ -0,0 +1,79 @@ +# YOLOv5Face Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成YOLOv5Face在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载YOLOv5Face模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s-face.onnx +wget todo + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolov5face/python/ + +# CPU推理 +python infer.py --model yolov5s-face.onnx --image todo --device cpu +# GPU推理 +python infer.py --model yolov5s-face.onnx --image todo --device gpu +# GPU上使用TensorRT推理 +python infer.py --model yolov5s-face.onnx --image todo --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv5Face Python接口 + +``` +fastdeploy.vision.facedet.YOLOv5Face(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +YOLOv5Face模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> YOLOv5Face.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [YOLOv5Face 模型介绍](..) +- [YOLOv5Face C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) From 17288568c1f17baa36ff5938b919ae5e267177ec Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:41:01 +0000 Subject: [PATCH 52/58] firt commit for modnet and arcface --- examples/vision/facedet/yolov5face/README.md | 4 - examples/vision/faceid/arcface/README.md | 40 +++++++++ .../vision/faceid/arcface/cpp/CMakeLists.txt | 14 +++ examples/vision/faceid/arcface/cpp/README.md | 85 +++++++++++++++++++ .../vision/faceid/arcface/python/README.md | 79 +++++++++++++++++ examples/vision/matting/modnet/README.md | 42 +++++++++ .../vision/matting/modnet/cpp/CMakeLists.txt | 14 +++ examples/vision/matting/modnet/cpp/README.md | 85 +++++++++++++++++++ .../vision/matting/modnet/python/README.md | 79 +++++++++++++++++ 9 files changed, 438 insertions(+), 4 deletions(-) create mode 100644 examples/vision/faceid/arcface/README.md create mode 100644 examples/vision/faceid/arcface/cpp/CMakeLists.txt create mode 100644 examples/vision/faceid/arcface/cpp/README.md create mode 100644 examples/vision/faceid/arcface/python/README.md create mode 100644 examples/vision/matting/modnet/README.md create mode 100644 examples/vision/matting/modnet/cpp/CMakeLists.txt create mode 100644 examples/vision/matting/modnet/cpp/README.md create mode 100644 examples/vision/matting/modnet/python/README.md diff --git a/examples/vision/facedet/yolov5face/README.md b/examples/vision/facedet/yolov5face/README.md index 22b9868e1b..34828b1938 100644 --- a/examples/vision/facedet/yolov5face/README.md +++ b/examples/vision/facedet/yolov5face/README.md @@ -24,10 +24,6 @@ ```bash onnxsim yolov5s-face.onnx yolov5s-face.onnx ``` -* 移动onnx文件到model_zoo/yolov5face的目录 - ```bash - cp PATH/TO/yolov5s-face.onnx PATH/TO/model_zoo/vision/yolov5face/ - ``` ## 下载预训练ONNX模型 diff --git a/examples/vision/faceid/arcface/README.md b/examples/vision/faceid/arcface/README.md new file mode 100644 index 0000000000..6a122ec400 --- /dev/null +++ b/examples/vision/faceid/arcface/README.md @@ -0,0 +1,40 @@ +# RetinaFace准备部署模型 + +## 模型版本说明 + +- [ArcFace CommitID:babb9a5](https://github.com/deepinsight/insightface/commit/babb9a5) + - (1)[链接中](https://github.com/deepinsight/insightface/commit/babb9a5)的*.pt通过[导出ONNX模型](#导出ONNX模型)操作后,可进行部署; + - (2)开发者基于自己数据训练的RetinaFace CommitID:b984b4b模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 + +## 导出ONNX模型 + +访问[ArcFace](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch)官方github库,按照指引下载安装,下载pt模型文件,利用 `torch2onnx.py` 得到`onnx`格式文件。 + +* 下载ArcFace模型文件 + ``` + Link: https://pan.baidu.com/share/init?surl=CL-l4zWqsI1oDuEEYVhj-g code: e8pw + ``` + +* 导出onnx格式文件 + ```bash + PYTHONPATH=. python ./torch2onnx.py ms1mv3_arcface_r100_fp16/backbone.pth --output ms1mv3_arcface_r100.onnx --network r100 --simplify 1 + ``` + +## 下载预训练ONNX模型 + + + +todo + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/faceid/arcface/cpp/CMakeLists.txt b/examples/vision/faceid/arcface/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/faceid/arcface/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/faceid/arcface/cpp/README.md b/examples/vision/faceid/arcface/cpp/README.md new file mode 100644 index 0000000000..505d144bbb --- /dev/null +++ b/examples/vision/faceid/arcface/cpp/README.md @@ -0,0 +1,85 @@ +# ArcFace C++部署示例 + +本目录下提供`infer.cc`快速完成ArcFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的ArcFace模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/ms1mv3_arcface_r34.onnx +wget todo + + +# CPU推理 +./infer_demo ms1mv3_arcface_r34.onnx todo 0 +# GPU推理 +./infer_demo ms1mv3_arcface_r34.onnx todo 1 +# GPU上TensorRT推理 +./infer_demo ms1mv3_arcface_r34.onnx todo 2 +``` + +运行完成可视化结果如下图所示 + + + +## ArcFace C++接口 + +### ArcFace类 + +``` +fastdeploy::vision::faceid::ArcFace( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +ArcFace模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> ArcFace::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/faceid/arcface/python/README.md b/examples/vision/faceid/arcface/python/README.md new file mode 100644 index 0000000000..034b93049e --- /dev/null +++ b/examples/vision/faceid/arcface/python/README.md @@ -0,0 +1,79 @@ +# ArcFace Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成ArcFace在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载arcface模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/ms1mv3_arcface_r34.onnx +wget todo + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/arcface/python/ + +# CPU推理 +python infer.py --model ms1mv3_arcface_r34.onnx --image todo --device cpu +# GPU推理 +python infer.py --model ms1mv3_arcface_r34.onnx --image todo --device gpu +# GPU上使用TensorRT推理 +python infer.py --model ms1mv3_arcface_r34.onnx --image todo --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## ArcFace Python接口 + +``` +fastdeploy.vision.faceid.ArcFace(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +ArcFace模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> ArcFace.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [ArcFace 模型介绍](..) +- [ArcFace C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) diff --git a/examples/vision/matting/modnet/README.md b/examples/vision/matting/modnet/README.md new file mode 100644 index 0000000000..fc3f7c0080 --- /dev/null +++ b/examples/vision/matting/modnet/README.md @@ -0,0 +1,42 @@ +# MODNet准备部署模型 + +## 模型版本说明 + +- [MODNet CommitID:28165a4](https://github.com/ZHKKKe/MODNet/commit/28165a4) + - (1)[链接中](https://github.com/ZHKKKe/MODNet/commit/28165a4)的*.pt通过[导出ONNX模型](#导出ONNX模型)操作后,可进行部署; + - (2)开发者基于自己数据训练的MODNet CommitID:b984b4b模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 + +## 导出ONNX模型 + + +访问[MODNet](https://github.com/ZHKKKe/MODNet)官方github库,按照指引下载安装,下载模型文件,利用 `onnx/export_onnx.py` 得到`onnx`格式文件。 + +* 导出onnx格式文件 + ```bash + python -m onnx.export_onnx \ + --ckpt-path=pretrained/modnet_photographic_portrait_matting.ckpt \ + --output-path=pretrained/modnet_photographic_portrait_matting.onnx + ``` + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了MODNet导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [modnet_photographic](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_photographic__portrait_matting.onnx) | 25MB | - | +| [modnet_webcam](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_webcam_portrait_matting.onnx) | 25MB | -| +| [modnet_photographic_256](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_photographic_portrait_matting-256x256.onnx) | 25MB | - | +| [modnet_webcam_256](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_webcam_portrait_matting-256x256.onnx) | 25MB | - | +| [modnet_photographic_512](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_photographic_portrait_matting-512x512.onnx) | 25MB | - | +| [modnet_webcam_512](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_webcam_portrait_matting-512x512.onnx) | 25MB | - | +| [modnet_photographic_1024](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_photographic_portrait_matting-1024x1024.onnx) | 25MB | - | +| [modnet_webcam_1024](https://bj.bcebos.com/paddlehub/fastdeploy/modnet_webcam_portrait_matting-1024x1024.onnx) | 25MB | -| + + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/matting/modnet/cpp/CMakeLists.txt b/examples/vision/matting/modnet/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/matting/modnet/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/matting/modnet/cpp/README.md b/examples/vision/matting/modnet/cpp/README.md new file mode 100644 index 0000000000..82226ae4c8 --- /dev/null +++ b/examples/vision/matting/modnet/cpp/README.md @@ -0,0 +1,85 @@ +# MODNet C++部署示例 + +本目录下提供`infer.cc`快速完成MODNet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的MODNet模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/modnet_photographic__portrait_matting.onnx +wget todo + + +# CPU推理 +./infer_demo modnet_photographic__portrait_matting.onnx todo 0 +# GPU推理 +./infer_demo modnet_photographic__portrait_matting.onnx todo 1 +# GPU上TensorRT推理 +./infer_demo modnet_photographic__portrait_matting.onnx todo 2 +``` + +运行完成可视化结果如下图所示 + + + +## MODNet C++接口 + +### MODNet类 + +``` +fastdeploy::vision::matting::MODNet( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +MODNet模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> MODNet::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/matting/modnet/python/README.md b/examples/vision/matting/modnet/python/README.md new file mode 100644 index 0000000000..1ae86020f4 --- /dev/null +++ b/examples/vision/matting/modnet/python/README.md @@ -0,0 +1,79 @@ +# MODNet Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成MODNet在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载modnet模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/modnet_photographic__portrait_matting.onnx +wget todo + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/modnet/python/ + +# CPU推理 +python infer.py --model modnet_photographic__portrait_matting.onnx --image todo --device cpu +# GPU推理 +python infer.py --model modnet_photographic__portrait_matting.onnx --image todo --device gpu +# GPU上使用TensorRT推理 +python infer.py --model modnet_photographic__portrait_matting.onnx --image todo --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## MODNet Python接口 + +``` +fastdeploy.vision.facedet.MODNet(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +MODNet模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> MODNet.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [MODNet 模型介绍](..) +- [MODNet C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) From 3d83654c9980e0f7be11206814dd7056aba4c4d4 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:42:09 +0000 Subject: [PATCH 53/58] firt commit for modnet and arcface --- examples/vision/matting/modnet/python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/vision/matting/modnet/python/README.md b/examples/vision/matting/modnet/python/README.md index 1ae86020f4..d7b1149f8e 100644 --- a/examples/vision/matting/modnet/python/README.md +++ b/examples/vision/matting/modnet/python/README.md @@ -32,7 +32,7 @@ python infer.py --model modnet_photographic__portrait_matting.onnx --image todo ## MODNet Python接口 ``` -fastdeploy.vision.facedet.MODNet(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +fastdeploy.vision.matting.MODNet(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) ``` MODNet模型加载和初始化,其中model_file为导出的ONNX模型格式 From b53179d29ac3d98c75b553f987296b4aac0eb47c Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:55:22 +0000 Subject: [PATCH 54/58] first commit for partial_fc --- examples/vision/faceid/partial_fc/README.md | 37 ++++++++ .../faceid/partial_fc/cpp/CMakeLists.txt | 14 +++ .../vision/faceid/partial_fc/cpp/README.md | 85 +++++++++++++++++++ .../vision/faceid/partial_fc/python/README.md | 79 +++++++++++++++++ 4 files changed, 215 insertions(+) create mode 100644 examples/vision/faceid/partial_fc/README.md create mode 100644 examples/vision/faceid/partial_fc/cpp/CMakeLists.txt create mode 100644 examples/vision/faceid/partial_fc/cpp/README.md create mode 100644 examples/vision/faceid/partial_fc/python/README.md diff --git a/examples/vision/faceid/partial_fc/README.md b/examples/vision/faceid/partial_fc/README.md new file mode 100644 index 0000000000..ca03ba2e79 --- /dev/null +++ b/examples/vision/faceid/partial_fc/README.md @@ -0,0 +1,37 @@ + + + + + + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了RetinaFace导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [partial_fc_glint360k_r50](https://bj.bcebos.com/paddlehub/fastdeploy/partial_fc_glint360k_r50.onnx) | 167MB | - | +| [partial_fc_glint360k_r100](https://bj.bcebos.com/paddlehub/fastdeploy/partial_fc_glint360k_r100.onnx) | 249MB | -| + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/faceid/partial_fc/cpp/CMakeLists.txt b/examples/vision/faceid/partial_fc/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/faceid/partial_fc/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/faceid/partial_fc/cpp/README.md b/examples/vision/faceid/partial_fc/cpp/README.md new file mode 100644 index 0000000000..20a2f0eb6e --- /dev/null +++ b/examples/vision/faceid/partial_fc/cpp/README.md @@ -0,0 +1,85 @@ +# PartialFC C++部署示例 + +本目录下提供`infer.cc`快速完成PartialFC在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的PartialFC模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/partial_fc_glint360k_r50.onnx +wget todo + + +# CPU推理 +./infer_demo partial_fc_glint360k_r50.onnx todo 0 +# GPU推理 +./infer_demo partial_fc_glint360k_r50.onnx todo 1 +# GPU上TensorRT推理 +./infer_demo partial_fc_glint360k_r50.onnx todo 2 +``` + +运行完成可视化结果如下图所示 + + + +## PartialFC C++接口 + +### PartialFC类 + +``` +fastdeploy::vision::faceid::PartialFC( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +PartialFC模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> PartialFC::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/faceid/partial_fc/python/README.md b/examples/vision/faceid/partial_fc/python/README.md new file mode 100644 index 0000000000..6189e99c47 --- /dev/null +++ b/examples/vision/faceid/partial_fc/python/README.md @@ -0,0 +1,79 @@ +# PartialFC Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成PartialFC在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载partial_fc模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/partial_fc_glint360k_r50.onnx +wget todo + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/partial_fc/python/ + +# CPU推理 +python infer.py --model partial_fc_glint360k_r50.onnx --image todo --device cpu +# GPU推理 +python infer.py --model partial_fc_glint360k_r50.onnx --image todo --device gpu +# GPU上使用TensorRT推理 +python infer.py --model partial_fc_glint360k_r50.onnx --image todo --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## PartialFC Python接口 + +``` +fastdeploy.vision.faceid.PartialFC(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +PartialFC模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> PartialFC.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [PartialFC 模型介绍](..) +- [PartialFC C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) From d2a12d18ccc7b2053448ffe1460677b7687e2344 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 14:58:08 +0000 Subject: [PATCH 55/58] first commit for partial_fc --- examples/vision/faceid/arcface/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/vision/faceid/arcface/README.md b/examples/vision/faceid/arcface/README.md index 6a122ec400..cb93054029 100644 --- a/examples/vision/faceid/arcface/README.md +++ b/examples/vision/faceid/arcface/README.md @@ -1,10 +1,10 @@ -# RetinaFace准备部署模型 +# ArcFace准备部署模型 ## 模型版本说明 - [ArcFace CommitID:babb9a5](https://github.com/deepinsight/insightface/commit/babb9a5) - (1)[链接中](https://github.com/deepinsight/insightface/commit/babb9a5)的*.pt通过[导出ONNX模型](#导出ONNX模型)操作后,可进行部署; - - (2)开发者基于自己数据训练的RetinaFace CommitID:b984b4b模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 + - (2)开发者基于自己数据训练的ArcFace CommitID:babb9a5模型,可按照[导出ONNX模型](#%E5%AF%BC%E5%87%BAONNX%E6%A8%A1%E5%9E%8B)后,完成部署。 ## 导出ONNX模型 From 2aecb9665f061cab924e23cf97aea4f5595e63e9 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 15:10:49 +0000 Subject: [PATCH 56/58] first commit for yolox --- examples/vision/detection/yolox/README.md | 23 +++++ .../vision/detection/yolox/cpp/CMakeLists.txt | 14 +++ examples/vision/detection/yolox/cpp/README.md | 85 +++++++++++++++++++ .../vision/detection/yolox/python/README.md | 79 +++++++++++++++++ 4 files changed, 201 insertions(+) create mode 100644 examples/vision/detection/yolox/README.md create mode 100644 examples/vision/detection/yolox/cpp/CMakeLists.txt create mode 100644 examples/vision/detection/yolox/cpp/README.md create mode 100644 examples/vision/detection/yolox/python/README.md diff --git a/examples/vision/detection/yolox/README.md b/examples/vision/detection/yolox/README.md new file mode 100644 index 0000000000..2a0d10d8ac --- /dev/null +++ b/examples/vision/detection/yolox/README.md @@ -0,0 +1,23 @@ +# YOLOv7准备部署模型 + +## 模型版本说明 + +- [YOLOX v0.1.1](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0) + - (1)[链接中](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0)的*.onnx可直接进行部署; + + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了YOLOv7导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [YOLOX-s](https://bj.bcebos.com/paddlehub/fastdeploy/yolox_s.onnx) | 35MB | 40.5% | + + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/detection/yolox/cpp/CMakeLists.txt b/examples/vision/detection/yolox/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/detection/yolox/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/detection/yolox/cpp/README.md b/examples/vision/detection/yolox/cpp/README.md new file mode 100644 index 0000000000..abe7611266 --- /dev/null +++ b/examples/vision/detection/yolox/cpp/README.md @@ -0,0 +1,85 @@ +# YOLOX C++部署示例 + +本目录下提供`infer.cc`快速完成YOLOX在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的YOLOX模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolox_s.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +# CPU推理 +./infer_demo yolox_s.onnx 000000014439.jpg 0 +# GPU推理 +./infer_demo yolox_s.onnx 000000014439.jpg 1 +# GPU上TensorRT推理 +./infer_demo yolox_s.onnx 000000014439.jpg 2 +``` + +运行完成可视化结果如下图所示 + + + +## YOLOX C++接口 + +### YOLOX类 + +``` +fastdeploy::vision::detection::YOLOX( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +YOLOX模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> YOLOX::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolox/python/README.md b/examples/vision/detection/yolox/python/README.md new file mode 100644 index 0000000000..7a73132a26 --- /dev/null +++ b/examples/vision/detection/yolox/python/README.md @@ -0,0 +1,79 @@ +# YOLOX Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成YOLOX在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载YOLOX模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolox_s.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolox/python/ + +# CPU推理 +python infer.py --model yolox_s.onnx --image 000000014439.jpg --device cpu +# GPU推理 +python infer.py --model yolox_s.onnx --image 000000014439.jpg --device gpu +# GPU上使用TensorRT推理 +python infer.py --model yolox_s.onnx --image 000000014439.jpg --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## YOLOX Python接口 + +``` +fastdeploy.vision.detection.YOLOX(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +YOLOX模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> YOLOX.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [YOLOX 模型介绍](..) +- [YOLOX C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) From 7165e0e668b7cbbcb557afb37092fae6de9a4282 Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 15:18:19 +0000 Subject: [PATCH 57/58] first commit for yolov6 --- .../vision/detection/nanodet_plus/README.md | 23 +++++ .../detection/nanodet_plus/cpp/CMakeLists.txt | 14 +++ .../detection/nanodet_plus/cpp/README.md | 85 +++++++++++++++++++ .../detection/nanodet_plus/python/README.md | 79 +++++++++++++++++ examples/vision/detection/yolov6/README.md | 23 +++++ .../detection/yolov6/cpp/CMakeLists.txt | 14 +++ .../vision/detection/yolov6/cpp/README.md | 85 +++++++++++++++++++ .../vision/detection/yolov6/python/README.md | 79 +++++++++++++++++ examples/vision/detection/yolox/README.md | 4 +- 9 files changed, 404 insertions(+), 2 deletions(-) create mode 100644 examples/vision/detection/nanodet_plus/README.md create mode 100644 examples/vision/detection/nanodet_plus/cpp/CMakeLists.txt create mode 100644 examples/vision/detection/nanodet_plus/cpp/README.md create mode 100644 examples/vision/detection/nanodet_plus/python/README.md create mode 100644 examples/vision/detection/yolov6/README.md create mode 100644 examples/vision/detection/yolov6/cpp/CMakeLists.txt create mode 100644 examples/vision/detection/yolov6/cpp/README.md create mode 100644 examples/vision/detection/yolov6/python/README.md diff --git a/examples/vision/detection/nanodet_plus/README.md b/examples/vision/detection/nanodet_plus/README.md new file mode 100644 index 0000000000..7f52f30310 --- /dev/null +++ b/examples/vision/detection/nanodet_plus/README.md @@ -0,0 +1,23 @@ +# YOLOv6准备部署模型 + +## 模型版本说明 + +- [YOLOv6 v0.1.0](https://github.com/meituan/YOLOv6/releases/download/0.1.0) + - (1)[链接中](https://github.com/meituan/YOLOv6/releases/download/0.1.0)的*.onnx可直接进行部署; + + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了YOLOv6导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [YOLOv6s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx) | 66MB | 43.1% | +| [YOLOv6s_640](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s-640x640.onnx) | 66MB | 43.1% | + +nanodet-plus-m_320.onnx nanodet-plus-m_320-sim.onnx + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/detection/nanodet_plus/cpp/CMakeLists.txt b/examples/vision/detection/nanodet_plus/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/detection/nanodet_plus/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/detection/nanodet_plus/cpp/README.md b/examples/vision/detection/nanodet_plus/cpp/README.md new file mode 100644 index 0000000000..5a73f8b55e --- /dev/null +++ b/examples/vision/detection/nanodet_plus/cpp/README.md @@ -0,0 +1,85 @@ +# YOLOv6 C++部署示例 + +本目录下提供`infer.cc`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的YOLOv6模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +# CPU推理 +./infer_demo yolov6s.onnx 000000014439.jpg 0 +# GPU推理 +./infer_demo yolov6s.onnx 000000014439.jpg 1 +# GPU上TensorRT推理 +./infer_demo yolov6s.onnx 000000014439.jpg 2 +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv6 C++接口 + +### YOLOv6类 + +``` +fastdeploy::vision::detection::YOLOv6( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> YOLOv6::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/nanodet_plus/python/README.md b/examples/vision/detection/nanodet_plus/python/README.md new file mode 100644 index 0000000000..35c35b2084 --- /dev/null +++ b/examples/vision/detection/nanodet_plus/python/README.md @@ -0,0 +1,79 @@ +# YOLOv6 Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载YOLOv6模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolov6/python/ + +# CPU推理 +python infer.py --model yolov6s.onnx --image 000000014439.jpg --device cpu +# GPU推理 +python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu +# GPU上使用TensorRT推理 +python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv6 Python接口 + +``` +fastdeploy.vision.detection.YOLOv6(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> YOLOv6.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [YOLOv6 模型介绍](..) +- [YOLOv6 C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolov6/README.md b/examples/vision/detection/yolov6/README.md new file mode 100644 index 0000000000..878e530bda --- /dev/null +++ b/examples/vision/detection/yolov6/README.md @@ -0,0 +1,23 @@ +# YOLOv6准备部署模型 + +## 模型版本说明 + +- [YOLOv6 v0.1.0](https://github.com/meituan/YOLOv6/releases/download/0.1.0) + - (1)[链接中](https://github.com/meituan/YOLOv6/releases/download/0.1.0)的*.onnx可直接进行部署; + + +## 下载预训练ONNX模型 + +为了方便开发者的测试,下面提供了YOLOv6导出的各系列模型,开发者可直接下载使用。 + +| 模型 | 大小 | 精度 | +|:---------------------------------------------------------------- |:----- |:----- | +| [YOLOv6s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx) | 66MB | 43.1% | +| [YOLOv6s_640](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s-640x640.onnx) | 66MB | 43.1% | + + + +## 详细部署文档 + +- [Python部署](python) +- [C++部署](cpp) diff --git a/examples/vision/detection/yolov6/cpp/CMakeLists.txt b/examples/vision/detection/yolov6/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fea1a2888b --- /dev/null +++ b/examples/vision/detection/yolov6/cpp/CMakeLists.txt @@ -0,0 +1,14 @@ +PROJECT(infer_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.12) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) +# 添加FastDeploy库依赖 +target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) diff --git a/examples/vision/detection/yolov6/cpp/README.md b/examples/vision/detection/yolov6/cpp/README.md new file mode 100644 index 0000000000..5a73f8b55e --- /dev/null +++ b/examples/vision/detection/yolov6/cpp/README.md @@ -0,0 +1,85 @@ +# YOLOv6 C++部署示例 + +本目录下提供`infer.cc`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. 根据开发环境,下载预编译部署库和samples代码,参考[FastDeploy预编译库](../../../../../docs/compile/prebuild_libraries.md) + +以Linux上CPU推理为例,在本目录执行如下命令即可完成编译测试 + +``` +mkdir build +cd build +wget https://xxx.tgz +tar xvf fastdeploy-linux-x64-0.2.0.tgz +cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 +make -j + +#下载官方转换好的YOLOv6模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +# CPU推理 +./infer_demo yolov6s.onnx 000000014439.jpg 0 +# GPU推理 +./infer_demo yolov6s.onnx 000000014439.jpg 1 +# GPU上TensorRT推理 +./infer_demo yolov6s.onnx 000000014439.jpg 2 +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv6 C++接口 + +### YOLOv6类 + +``` +fastdeploy::vision::detection::YOLOv6( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` + +YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX时,此参数传入空字符串即可 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX格式 + +#### Predict函数 + +> ``` +> YOLOv6::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/) +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +### 类成员变量 + +> > * **size**(vector<int>): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(vector<float>): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=ture` 表示不使用填充的方式,默认值为`is_no_pad=false` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=false` +> > * **stride**(int): 配合`stris_mini_pad`成员变量使用, 默认值为`stride=32` + +- [模型介绍](../../) +- [Python部署](../python) +- [视觉模型预测结果](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolov6/python/README.md b/examples/vision/detection/yolov6/python/README.md new file mode 100644 index 0000000000..35c35b2084 --- /dev/null +++ b/examples/vision/detection/yolov6/python/README.md @@ -0,0 +1,79 @@ +# YOLOv6 Python部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) + +本目录下提供`infer.py`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 + +``` +#下载YOLOv6模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + + +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd examples/vison/detection/yolov6/python/ + +# CPU推理 +python infer.py --model yolov6s.onnx --image 000000014439.jpg --device cpu +# GPU推理 +python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu +# GPU上使用TensorRT推理 +python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu --use_trt True +``` + +运行完成可视化结果如下图所示 + + + +## YOLOv6 Python接口 + +``` +fastdeploy.vision.detection.YOLOv6(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +``` + +YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径,当模型格式为ONNX格式时,此参数无需设定 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式,默认为ONNX + +### predict函数 + +> ``` +> YOLOv6.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +> **返回** +> +> > 返回`fastdeploy.vision.DetectionResult`结构体,结构体说明参考文档[视觉模型预测结果](../../../../../docs/api/vision_results/) + +### 类成员属性 + +> > * **size**(list[int]): 通过此参数修改预处理过程中resize的大小,包含两个整型元素,表示[width, height], 默认值为[640, 640] +> > * **padding_value**(list[float]): 通过此参数可以修改图片在resize时候做填充(padding)的值, 包含三个浮点型元素, 分别表示三个通道的值, 默认值为[114, 114, 114] +> > * **is_no_pad**(bool): 通过此参数让图片是否通过填充的方式进行resize, `is_no_pad=True` 表示不使用填充的方式,默认值为`is_no_pad=False` +> > * **is_mini_pad**(bool): 通过此参数可以将resize之后图像的宽高这是为最接近`size`成员变量的值, 并且满足填充的像素大小是可以被`stride`成员变量整除的。默认值为`is_mini_pad=False` +> > * **stride**(int): 配合`stris_mini_padide`成员变量使用, 默认值为`stride=32` + + + +## 其它文档 + +- [YOLOv6 模型介绍](..) +- [YOLOv6 C++部署](../cpp) +- [模型预测结果说明](../../../../../docs/api/vision_results/) diff --git a/examples/vision/detection/yolox/README.md b/examples/vision/detection/yolox/README.md index 2a0d10d8ac..72dc51be1d 100644 --- a/examples/vision/detection/yolox/README.md +++ b/examples/vision/detection/yolox/README.md @@ -1,4 +1,4 @@ -# YOLOv7准备部署模型 +# YOLOX准备部署模型 ## 模型版本说明 @@ -8,7 +8,7 @@ ## 下载预训练ONNX模型 -为了方便开发者的测试,下面提供了YOLOv7导出的各系列模型,开发者可直接下载使用。 +为了方便开发者的测试,下面提供了YOLOX导出的各系列模型,开发者可直接下载使用。 | 模型 | 大小 | 精度 | |:---------------------------------------------------------------- |:----- |:----- | From 1c7a5786c1e6cfeffabea97222cbb4b99735f65b Mon Sep 17 00:00:00 2001 From: ziqi-jin Date: Wed, 10 Aug 2022 15:27:23 +0000 Subject: [PATCH 58/58] first commit for nano_det --- .../vision/detection/nanodet_plus/README.md | 13 ++++----- .../detection/nanodet_plus/cpp/README.md | 24 ++++++++-------- .../detection/nanodet_plus/python/README.md | 28 +++++++++---------- 3 files changed, 32 insertions(+), 33 deletions(-) diff --git a/examples/vision/detection/nanodet_plus/README.md b/examples/vision/detection/nanodet_plus/README.md index 7f52f30310..b3fd574631 100644 --- a/examples/vision/detection/nanodet_plus/README.md +++ b/examples/vision/detection/nanodet_plus/README.md @@ -1,21 +1,20 @@ -# YOLOv6准备部署模型 +# NanoDetPlus准备部署模型 ## 模型版本说明 -- [YOLOv6 v0.1.0](https://github.com/meituan/YOLOv6/releases/download/0.1.0) - - (1)[链接中](https://github.com/meituan/YOLOv6/releases/download/0.1.0)的*.onnx可直接进行部署; +- [NanoDetPlus v1.0.0-alpha-1](https://github.com/RangiLyu/nanodet/releases/tag/v1.0.0-alpha-1) + - (1)[链接中](https://github.com/RangiLyu/nanodet/releases/tag/v1.0.0-alpha-1)的*.onnx可直接进行部署 ## 下载预训练ONNX模型 -为了方便开发者的测试,下面提供了YOLOv6导出的各系列模型,开发者可直接下载使用。 +为了方便开发者的测试,下面提供了NanoDetPlus导出的各系列模型,开发者可直接下载使用。 | 模型 | 大小 | 精度 | |:---------------------------------------------------------------- |:----- |:----- | -| [YOLOv6s](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx) | 66MB | 43.1% | -| [YOLOv6s_640](https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s-640x640.onnx) | 66MB | 43.1% | +| [NanoDetPlus_320](https://bj.bcebos.com/paddlehub/fastdeploy/nanodet-plus-m_320.onnx ) | 4.6MB | 27.0% | +| [NanoDetPlus_320_sim](https://bj.bcebos.com/paddlehub/fastdeploy/nanodet-plus-m_320-sim.onnx) | 4.6MB | 27.0% | -nanodet-plus-m_320.onnx nanodet-plus-m_320-sim.onnx ## 详细部署文档 diff --git a/examples/vision/detection/nanodet_plus/cpp/README.md b/examples/vision/detection/nanodet_plus/cpp/README.md index 5a73f8b55e..2dbee5e31d 100644 --- a/examples/vision/detection/nanodet_plus/cpp/README.md +++ b/examples/vision/detection/nanodet_plus/cpp/README.md @@ -1,6 +1,6 @@ -# YOLOv6 C++部署示例 +# NanoDetPlus C++部署示例 -本目录下提供`infer.cc`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 +本目录下提供`infer.cc`快速完成NanoDetPlus在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。 在部署前,需确认以下两个步骤 @@ -17,36 +17,36 @@ tar xvf fastdeploy-linux-x64-0.2.0.tgz cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-0.2.0 make -j -#下载官方转换好的YOLOv6模型文件和测试图片 -wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx +#下载官方转换好的NanoDetPlus模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/nanodet-plus-m_320.onnx wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg # CPU推理 -./infer_demo yolov6s.onnx 000000014439.jpg 0 +./infer_demo nanodet-plus-m_320.onnx 000000014439.jpg 0 # GPU推理 -./infer_demo yolov6s.onnx 000000014439.jpg 1 +./infer_demo nanodet-plus-m_320.onnx 000000014439.jpg 1 # GPU上TensorRT推理 -./infer_demo yolov6s.onnx 000000014439.jpg 2 +./infer_demo nanodet-plus-m_320.onnx 000000014439.jpg 2 ``` 运行完成可视化结果如下图所示 -## YOLOv6 C++接口 +## NanoDetPlus C++接口 -### YOLOv6类 +### NanoDetPlus类 ``` -fastdeploy::vision::detection::YOLOv6( +fastdeploy::vision::detection::NanoDetPlus( const string& model_file, const string& params_file = "", const RuntimeOption& runtime_option = RuntimeOption(), const Frontend& model_format = Frontend::ONNX) ``` -YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式。 +NanoDetPlus模型加载和初始化,其中model_file为导出的ONNX模型格式。 **参数** @@ -58,7 +58,7 @@ YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式。 #### Predict函数 > ``` -> YOLOv6::Predict(cv::Mat* im, DetectionResult* result, +> NanoDetPlus::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` diff --git a/examples/vision/detection/nanodet_plus/python/README.md b/examples/vision/detection/nanodet_plus/python/README.md index 35c35b2084..7a60a31c8f 100644 --- a/examples/vision/detection/nanodet_plus/python/README.md +++ b/examples/vision/detection/nanodet_plus/python/README.md @@ -1,41 +1,41 @@ -# YOLOv6 Python部署示例 +# NanoDetPlus Python部署示例 在部署前,需确认以下两个步骤 - 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/quick_start/requirements.md) - 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../docs/quick_start/install.md) -本目录下提供`infer.py`快速完成YOLOv6在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 +本目录下提供`infer.py`快速完成NanoDetPlus在CPU/GPU,以及GPU上通过TensorRT加速部署的示例。执行如下脚本即可完成 ``` -#下载YOLOv6模型文件和测试图片 -wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s.onnx +#下载NanoDetPlus模型文件和测试图片 +wget https://bj.bcebos.com/paddlehub/fastdeploy/nanodet-plus-m_320.onnx wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg #下载部署示例代码 git clone https://github.com/PaddlePaddle/FastDeploy.git -cd examples/vison/detection/yolov6/python/ +cd examples/vison/detection/nanodet_plus/python/ # CPU推理 -python infer.py --model yolov6s.onnx --image 000000014439.jpg --device cpu +python infer.py --model nanodet-plus-m_320.onnx --image 000000014439.jpg --device cpu # GPU推理 -python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu +python infer.py --model nanodet-plus-m_320.onnx --image 000000014439.jpg --device gpu # GPU上使用TensorRT推理 -python infer.py --model yolov6s.onnx --image 000000014439.jpg --device gpu --use_trt True +python infer.py --model nanodet-plus-m_320.onnx --image 000000014439.jpg --device gpu --use_trt True ``` 运行完成可视化结果如下图所示 -## YOLOv6 Python接口 +## NanoDetPlus Python接口 ``` -fastdeploy.vision.detection.YOLOv6(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) +fastdeploy.vision.detection.NanoDetPlus(model_file, params_file=None, runtime_option=None, model_format=Frontend.ONNX) ``` -YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式 +NanoDetPlus模型加载和初始化,其中model_file为导出的ONNX模型格式 **参数** @@ -47,7 +47,7 @@ YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式 ### predict函数 > ``` -> YOLOv6.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> NanoDetPlus.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) > ``` > > 模型预测结口,输入图像直接输出检测结果。 @@ -74,6 +74,6 @@ YOLOv6模型加载和初始化,其中model_file为导出的ONNX模型格式 ## 其它文档 -- [YOLOv6 模型介绍](..) -- [YOLOv6 C++部署](../cpp) +- [NanoDetPlus 模型介绍](..) +- [NanoDetPlus C++部署](../cpp) - [模型预测结果说明](../../../../../docs/api/vision_results/)