diff --git a/csrcs/fastdeploy/vision.h b/csrcs/fastdeploy/vision.h index 3b765c387e..2c0bdd1fa8 100644 --- a/csrcs/fastdeploy/vision.h +++ b/csrcs/fastdeploy/vision.h @@ -28,6 +28,7 @@ #include "fastdeploy/vision/wongkinyiu/scaledyolov4.h" #include "fastdeploy/vision/wongkinyiu/yolor.h" #include "fastdeploy/vision/wongkinyiu/yolov7.h" +#include "fastdeploy/vision/ppogg/yolov5lite.h" #endif #include "fastdeploy/vision/visualize/visualize.h" diff --git a/csrcs/fastdeploy/vision/ppogg/ppogg_pybind.cc b/csrcs/fastdeploy/vision/ppogg/ppogg_pybind.cc new file mode 100644 index 0000000000..606737ae11 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppogg/ppogg_pybind.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindPpogg(pybind11::module& m) { + auto ppogg_module = + m.def_submodule("ppogg", "https://github.com/ppogg/YOLOv5-Lite"); + pybind11::class_(ppogg_module, + "YOLOv5Lite") + .def(pybind11::init()) + .def("predict", + [](vision::ppogg::YOLOv5Lite& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }) + .def_readwrite("size", &vision::ppogg::YOLOv5Lite::size) + .def_readwrite("padding_value", &vision::ppogg::YOLOv5Lite::padding_value) + .def_readwrite("is_mini_pad", &vision::ppogg::YOLOv5Lite::is_mini_pad) + .def_readwrite("is_no_pad", &vision::ppogg::YOLOv5Lite::is_no_pad) + .def_readwrite("is_scale_up", &vision::ppogg::YOLOv5Lite::is_scale_up) + .def_readwrite("stride", &vision::ppogg::YOLOv5Lite::stride) + .def_readwrite("max_wh", &vision::ppogg::YOLOv5Lite::max_wh) + .def_readwrite("anchor_config", &vision::ppogg::YOLOv5Lite::anchor_config) + .def_readwrite("is_decode_exported", + &vision::ppogg::YOLOv5Lite::is_decode_exported); +} +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc b/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc new file mode 100644 index 0000000000..320867f581 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppogg/yolov5lite.cc @@ -0,0 +1,386 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/ppogg/yolov5lite.h" +#include "fastdeploy/utils/perf.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace ppogg { + +void YOLOv5Lite::LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill, bool scale_up, int stride) { + float scale = + std::min(size[1] * 1.0 / mat->Height(), size[0] * 1.0 / mat->Width()); + if (!scale_up) { + scale = std::min(scale, 1.0f); + } + + int resize_h = int(round(mat->Height() * scale)); + int resize_w = int(round(mat->Width() * scale)); + + int pad_w = size[0] - resize_w; + int pad_h = size[1] - resize_h; + if (_auto) { + pad_h = pad_h % stride; + pad_w = pad_w % stride; + } else if (scale_fill) { + pad_h = 0; + pad_w = 0; + resize_h = size[1]; + resize_w = size[0]; + } + if (resize_h != mat->Height() || resize_w != mat->Width()) { + Resize::Run(mat, resize_w, resize_h); + } + if (pad_h > 0 || pad_w > 0) { + float half_h = pad_h * 1.0 / 2; + int top = int(round(half_h - 0.1)); + int bottom = int(round(half_h + 0.1)); + float half_w = pad_w * 1.0 / 2; + int left = int(round(half_w - 0.1)); + int right = int(round(half_w + 0.1)); + Pad::Run(mat, top, bottom, left, right, color); + } +} + +void YOLOv5Lite::GenerateAnchors(const std::vector& size, + const std::vector& downsample_strides, + std::vector* anchors, + int num_anchors) { + // size: tuple of input (width, height) + // downsample_strides: downsample strides in YOLOv5Lite, e.g (8,16,32) + const int width = size[0]; + const int height = size[1]; + for (int i = 0; i < downsample_strides.size(); ++i) { + const int ds = downsample_strides[i]; + int num_grid_w = width / ds; + int num_grid_h = height / ds; + for (int an = 0; an < num_anchors; ++an) { + float anchor_w = anchor_config[i][an * 2]; + float anchor_h = anchor_config[i][an * 2 + 1]; + for (int g1 = 0; g1 < num_grid_h; ++g1) { + for (int g0 = 0; g0 < num_grid_w; ++g0) { + (*anchors).emplace_back(Anchor{g0, g1, ds, anchor_w, anchor_h}); + } + } + } + } +} + +YOLOv5Lite::YOLOv5Lite(const std::string& model_file, + const std::string& params_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + if (model_format == Frontend::ONNX) { + valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 + valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 + } else { + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOv5Lite::Initialize() { + // parameters for preprocess + size = {640, 640}; + padding_value = {114.0, 114.0, 114.0}; + downsample_strides = {8, 16, 32}; + is_mini_pad = false; + is_no_pad = false; + is_scale_up = false; + stride = 32; + max_wh = 7680.0; + is_decode_exported = false; + anchor_config = {{10.0, 13.0, 16.0, 30.0, 33.0, 23.0}, + {30.0, 61.0, 62.0, 45.0, 59.0, 119.0}, + {116.0, 90.0, 156.0, 198.0, 373.0, 326.0}}; + + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + return true; +} + +bool YOLOv5Lite::Preprocess( + Mat* mat, FDTensor* output, + std::map>* im_info) { + // process after image load + float ratio = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); + if (ratio != 1.0) { + int interp = cv::INTER_AREA; + if (ratio > 1.0) { + interp = cv::INTER_LINEAR; + } + int resize_h = int(mat->Height() * ratio); + int resize_w = int(mat->Width() * ratio); + Resize::Run(mat, resize_w, resize_h, -1, -1, interp); + } + // yolov5lite's preprocess steps + // 1. letterbox + // 2. BGR->RGB + // 3. HWC->CHW + YOLOv5Lite::LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, + is_scale_up, stride); + BGR2RGB::Run(mat); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); + + // Record output shape of preprocessed image + (*im_info)["output_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + HWC2CHW::Run(mat); + Cast::Run(mat, "float"); + mat->ShareWithTensor(output); + output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c + return true; +} + +bool YOLOv5Lite::PostprocessWithDecode( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + // generate anchors with dowmsample strides + std::vector anchors; + int num_anchors = anchor_config[0].size() / 2; + GenerateAnchors(size, downsample_strides, &anchors, num_anchors); + + // infer_result shape might look like (1,n,85=5+80) + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + int s = i * infer_result.shape[2]; + float confidence = data[s + 4]; + float* max_class_score = + std::max_element(data + s + 5, data + s + infer_result.shape[2]); + confidence *= (*max_class_score); + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + int32_t label_id = std::distance(data + s + 5, max_class_score); + // fetch i-th anchor + float grid0 = static_cast(anchors.at(i).grid0); + float grid1 = static_cast(anchors.at(i).grid1); + float downsample_stride = static_cast(anchors.at(i).stride); + float anchor_w = static_cast(anchors.at(i).anchor_w); + float anchor_h = static_cast(anchors.at(i).anchor_h); + // convert from offsets to [x, y, w, h] + float dx = data[s]; + float dy = data[s + 1]; + float dw = data[s + 2]; + float dh = data[s + 3]; + + float x = (dx * 2.0f - 0.5f + grid0) * downsample_stride; + float y = (dy * 2.0f - 0.5f + grid1) * downsample_stride; + float w = std::pow(dw * 2.0f, 2.0f) * anchor_w; + float h = std::pow(dh * 2.0f, 2.0f) * anchor_h; + + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + x - w / 2.0f + label_id * max_wh, y - h / 2.0f + label_id * max_wh, + x + w / 2.0f + label_id * max_wh, y + h / 2.0f + label_id * max_wh}); + // label_id * max_wh for multi classes NMS + result->label_ids.push_back(label_id); + result->scores.push_back(confidence); + } + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float scale = std::min(out_h / ipt_h, out_w / ipt_w); + float pad_h = (out_h - ipt_h * scale) / 2.0f; + float pad_w = (out_w - ipt_w * scale) / 2.0f; + if (is_mini_pad) { + // 和 LetterBox中_auto=true的处理逻辑对应 + pad_h = static_cast(static_cast(pad_h) % stride); + pad_w = static_cast(static_cast(pad_w) % stride); + } + for (size_t i = 0; i < result->boxes.size(); ++i) { + int32_t label_id = (result->label_ids)[i]; + // clip box + result->boxes[i][0] = result->boxes[i][0] - max_wh * label_id; + result->boxes[i][1] = result->boxes[i][1] - max_wh * label_id; + result->boxes[i][2] = result->boxes[i][2] - max_wh * label_id; + result->boxes[i][3] = result->boxes[i][3] - max_wh * label_id; + result->boxes[i][0] = std::max((result->boxes[i][0] - pad_w) / scale, 0.0f); + result->boxes[i][1] = std::max((result->boxes[i][1] - pad_h) / scale, 0.0f); + result->boxes[i][2] = std::max((result->boxes[i][2] - pad_w) / scale, 0.0f); + result->boxes[i][3] = std::max((result->boxes[i][3] - pad_h) / scale, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w - 1.0f); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h - 1.0f); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w - 1.0f); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h - 1.0f); + } + return true; +} + +bool YOLOv5Lite::Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + int s = i * infer_result.shape[2]; + float confidence = data[s + 4]; + float* max_class_score = + std::max_element(data + s + 5, data + s + infer_result.shape[2]); + confidence *= (*max_class_score); + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + int32_t label_id = std::distance(data + s + 5, max_class_score); + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + data[s] - data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh, + data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh}); + result->label_ids.push_back(label_id); + result->scores.push_back(confidence); + } + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float scale = std::min(out_h / ipt_h, out_w / ipt_w); + float pad_h = (out_h - ipt_h * scale) / 2.0f; + float pad_w = (out_w - ipt_w * scale) / 2.0f; + if (is_mini_pad) { + // 和 LetterBox中_auto=true的处理逻辑对应 + pad_h = static_cast(static_cast(pad_h) % stride); + pad_w = static_cast(static_cast(pad_w) % stride); + } + for (size_t i = 0; i < result->boxes.size(); ++i) { + int32_t label_id = (result->label_ids)[i]; + // clip box + result->boxes[i][0] = result->boxes[i][0] - max_wh * label_id; + result->boxes[i][1] = result->boxes[i][1] - max_wh * label_id; + result->boxes[i][2] = result->boxes[i][2] - max_wh * label_id; + result->boxes[i][3] = result->boxes[i][3] - max_wh * label_id; + result->boxes[i][0] = std::max((result->boxes[i][0] - pad_w) / scale, 0.0f); + result->boxes[i][1] = std::max((result->boxes[i][1] - pad_h) / scale, 0.0f); + result->boxes[i][2] = std::max((result->boxes[i][2] - pad_w) / scale, 0.0f); + result->boxes[i][3] = std::max((result->boxes[i][3] - pad_h) / scale, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w - 1.0f); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h - 1.0f); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w - 1.0f); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h - 1.0f); + } + return true; +} + +bool YOLOv5Lite::Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold, float nms_iou_threshold) { +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_START(0) +#endif + std::cout << nms_iou_threshold << nms_iou_threshold << std::endl; + Mat mat(*im); + std::vector input_tensors(1); + + std::map> im_info; + + // Record the shape of image and the shape of preprocessed image + im_info["input_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + im_info["output_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + + if (!Preprocess(&mat, &input_tensors[0], &im_info)) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(0, "Preprocess") + TIMERECORD_START(1) +#endif + + input_tensors[0].name = InputInfoOfRuntime(0).name; + std::vector output_tensors; + if (!Infer(input_tensors, &output_tensors)) { + FDERROR << "Failed to inference." << std::endl; + return false; + } +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(1, "Inference") + TIMERECORD_START(2) +#endif + + if (is_decode_exported) { + if (!Postprocess(output_tensors[0], result, im_info, conf_threshold, + nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + } else { + if (!PostprocessWithDecode(output_tensors[0], result, im_info, + conf_threshold, nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(2, "Postprocess") +#endif + return true; +} + +} // namespace ppogg +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/ppogg/yolov5lite.h b/csrcs/fastdeploy/vision/ppogg/yolov5lite.h new file mode 100644 index 0000000000..3eb556cfa3 --- /dev/null +++ b/csrcs/fastdeploy/vision/ppogg/yolov5lite.h @@ -0,0 +1,132 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { +namespace vision { +namespace ppogg { + +class FASTDEPLOY_DECL YOLOv5Lite : public FastDeployModel { + public: + // 当model_format为ONNX时,无需指定params_file + // 当model_format为Paddle时,则需同时指定model_file & params_file + YOLOv5Lite(const std::string& model_file, const std::string& params_file = "", + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX); + + // 定义模型的名称 + virtual std::string ModelName() const { return "ppogg/YOLOv5-Lite"; } + + // 模型预测接口,即用户调用的接口 + // im 为用户的输入数据,目前对于CV均定义为cv::Mat + // result 为模型预测的输出结构体 + // conf_threshold 为后处理的参数 + // nms_iou_threshold 为后处理的参数 + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.45, + float nms_iou_threshold = 0.25); + + // 以下为模型在预测时的一些参数,基本是前后处理所需 + // 用户在创建模型后,可根据模型的要求,以及自己的需求 + // 对参数进行修改 + // tuple of (width, height) + std::vector size; + // padding value, size should be same with Channels + std::vector padding_value; + // only pad to the minimum rectange which height and width is times of stride + bool is_mini_pad; + // while is_mini_pad = false and is_no_pad = true, will resize the image to + // the set size + bool is_no_pad; + // if is_scale_up is false, the input image only can be zoom out, the maximum + // resize scale cannot exceed 1.0 + bool is_scale_up; + // padding stride, for is_mini_pad + int stride; + // for offseting the boxes by classes when using NMS + float max_wh; + // downsample strides for YOLOv5Lite to generate anchors, will take + // (8,16,32) as default values, might have stride=64. + std::vector downsample_strides; + // anchors parameters, downsample_strides will take + // (8,16,32), each stride has three anchors with width and hight. + std::vector> anchor_config; + // whether the model_file was exported with decode module. The official + // YOLOv5Lite/export.py script will export ONNX file without + // decode module. Please set it 'true' manually if the model file + // was exported with decode module. + // false : ONNX files without decode module. + // true : ONNX file with decode module. + bool is_decode_exported; + + private: + // necessary parameters for GenerateAnchors to generate anchors when ONNX file + // without decode module. + struct Anchor { + int grid0; + int grid1; + int stride; + float anchor_w; + float anchor_h; + }; + + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + bool Preprocess(Mat* mat, FDTensor* output, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + bool Postprocess(FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // YOLOv5Lite的官方脚本默认导出不带decode模块的模型文件 需要在后处理进行decode + // the official YOLOv5Lite/export.py will export ONNX file without decode + // module. + // this fuction support the postporocess for ONNX file without decode module. + // set the `is_decode_exported = false`, this function will work. + bool PostprocessWithDecode( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 对图片进行LetterBox处理 + // mat 为读取到的原图 + // size 为输入模型的图像尺寸 + void LetterBox(Mat* mat, const std::vector& size, + const std::vector& color, bool _auto, + bool scale_fill = false, bool scale_up = true, + int stride = 32); + // generate anchors for decodeing when ONNX file without decode module. + void GenerateAnchors(const std::vector& size, + const std::vector& downsample_strides, + std::vector* anchors, const int num_anchors = 3); +}; +} // namespace ppogg +} // namespace vision +} // namespace fastdeploy diff --git a/csrcs/fastdeploy/vision/vision_pybind.cc b/csrcs/fastdeploy/vision/vision_pybind.cc index 18b662e68e..79aa876351 100644 --- a/csrcs/fastdeploy/vision/vision_pybind.cc +++ b/csrcs/fastdeploy/vision/vision_pybind.cc @@ -27,6 +27,7 @@ void BindDeepCam(pybind11::module& m); void BindRangiLyu(pybind11::module& m); void BindLinzaer(pybind11::module& m); void BindBiubug6(pybind11::module& m); +void BindPpogg(pybind11::module& m); #ifdef ENABLE_VISION_VISUALIZE void BindVisualize(pybind11::module& m); #endif @@ -73,6 +74,7 @@ void BindVision(pybind11::module& m) { BindRangiLyu(m); BindLinzaer(m); BindBiubug6(m); + BindPpogg(m); #ifdef ENABLE_VISION_VISUALIZE BindVisualize(m); #endif diff --git a/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h b/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h index 788b57474f..39066a29ec 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h +++ b/csrcs/fastdeploy/vision/wongkinyiu/scaledyolov4.h @@ -70,7 +70,7 @@ class FASTDEPLOY_DECL ScaledYOLOv4 : public FastDeployModel { // Mat为FastDeploy定义的数据结构 // FDTensor为预处理后的Tensor数据,传给后端进行推理 // im_info为预处理过程保存的数据,在后处理中需要用到 - bool Preprocess(Mat* mat, FDTensor* outputs, + bool Preprocess(Mat* mat, FDTensor* output, std::map>* im_info); // 后端推理结果后处理,输出给用户 diff --git a/csrcs/fastdeploy/vision/wongkinyiu/yolor.h b/csrcs/fastdeploy/vision/wongkinyiu/yolor.h index 69f5ea8760..7597f42d32 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/yolor.h +++ b/csrcs/fastdeploy/vision/wongkinyiu/yolor.h @@ -69,7 +69,7 @@ class FASTDEPLOY_DECL YOLOR : public FastDeployModel { // Mat为FastDeploy定义的数据结构 // FDTensor为预处理后的Tensor数据,传给后端进行推理 // im_info为预处理过程保存的数据,在后处理中需要用到 - bool Preprocess(Mat* mat, FDTensor* outputs, + bool Preprocess(Mat* mat, FDTensor* output, std::map>* im_info); // 后端推理结果后处理,输出给用户 diff --git a/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h b/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h index c494754f0e..64e18ad47b 100644 --- a/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h +++ b/csrcs/fastdeploy/vision/wongkinyiu/yolov7.h @@ -69,7 +69,7 @@ class FASTDEPLOY_DECL YOLOv7 : public FastDeployModel { // Mat为FastDeploy定义的数据结构 // FDTensor为预处理后的Tensor数据,传给后端进行推理 // im_info为预处理过程保存的数据,在后处理中需要用到 - bool Preprocess(Mat* mat, FDTensor* outputs, + bool Preprocess(Mat* mat, FDTensor* output, std::map>* im_info); // 后端推理结果后处理,输出给用户 diff --git a/examples/vision/ppogg_yolov5lite.cc b/examples/vision/ppogg_yolov5lite.cc new file mode 100644 index 0000000000..577543b375 --- /dev/null +++ b/examples/vision/ppogg_yolov5lite.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "../resources/models/yolov5lite.onnx"; + std::string img_path = "../resources/images/test.jpg"; + std::string vis_path = "../resources/outputs/ppogg_yolov5lite_vis_result.jpg"; + + auto model = vis::ppogg::YOLOv5Lite(model_file); + if (!model.Initialized()) { + std::cerr << "Init Failed! Model: " << model_file << std::endl; + return -1; + } else { + std::cout << "Init Done! Model:" << model_file << std::endl; + } + model.EnableDebug(); + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index a362029832..067659570e 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -26,3 +26,4 @@ from . import rangilyu from . import linzaer from . import biubug6 +from . import ppogg diff --git a/fastdeploy/vision/ppogg/__init__.py b/fastdeploy/vision/ppogg/__init__.py new file mode 100644 index 0000000000..3acd1edb2d --- /dev/null +++ b/fastdeploy/vision/ppogg/__init__.py @@ -0,0 +1,139 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import logging +from ... import FastDeployModel, Frontend +from ... import fastdeploy_main as C + + +class YOLOv5Lite(FastDeployModel): + def __init__(self, + model_file, + params_file="", + runtime_option=None, + model_format=Frontend.ONNX): + # 调用基函数进行backend_option的初始化 + # 初始化后的option保存在self._runtime_option + super(YOLOv5Lite, self).__init__(runtime_option) + + self._model = C.vision.ppogg.YOLOv5Lite( + model_file, params_file, self._runtime_option, model_format) + # 通过self.initialized判断整个模型的初始化是否成功 + assert self.initialized, "YOLOv5Lite initialize failed." + + def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): + return self._model.predict(input_image, conf_threshold, + nms_iou_threshold) + + # 一些跟YOLOv5Lite模型有关的属性封装 + # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) + @property + def size(self): + return self._model.size + + @property + def padding_value(self): + return self._model.padding_value + + @property + def is_no_pad(self): + return self._model.is_no_pad + + @property + def is_mini_pad(self): + return self._model.is_mini_pad + + @property + def is_scale_up(self): + return self._model.is_scale_up + + @property + def stride(self): + return self._model.stride + + @property + def max_wh(self): + return self._model.max_wh + + @property + def is_decode_exported(self): + return self._model.is_decode_exported + + @property + def anchor_config(self): + return self._model.anchor_config + + @size.setter + def size(self, wh): + assert isinstance(wh, [list, tuple]),\ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2,\ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self._model.size = wh + + @padding_value.setter + def padding_value(self, value): + assert isinstance( + value, + list), "The value to set `padding_value` must be type of list." + self._model.padding_value = value + + @is_no_pad.setter + def is_no_pad(self, value): + assert isinstance( + value, bool), "The value to set `is_no_pad` must be type of bool." + self._model.is_no_pad = value + + @is_mini_pad.setter + def is_mini_pad(self, value): + assert isinstance( + value, + bool), "The value to set `is_mini_pad` must be type of bool." + self._model.is_mini_pad = value + + @is_scale_up.setter + def is_scale_up(self, value): + assert isinstance( + value, + bool), "The value to set `is_scale_up` must be type of bool." + self._model.is_scale_up = value + + @stride.setter + def stride(self, value): + assert isinstance( + value, int), "The value to set `stride` must be type of int." + self._model.stride = value + + @max_wh.setter + def max_wh(self, value): + assert isinstance( + value, float), "The value to set `max_wh` must be type of float." + self._model.max_wh = value + + @is_decode_exported.setter + def is_decode_exported(self, value): + assert isinstance( + value, + bool), "The value to set `is_decode_exported` must be type of bool." + self._model.is_decode_exported = value + + @anchor_config.setter + def anchor_config(self, anchor_config_val): + assert isinstance(anchor_config_val, list),\ + "The value to set `anchor_config` must be type of tuple or list." + assert isinstance(anchor_config_val[0], list),\ + "The value to set `anchor_config` must be 2-dimensions tuple or list" + self._model.anchor_config = anchor_config_val diff --git a/model_zoo/vision/yolov5lite/README.md b/model_zoo/vision/yolov5lite/README.md new file mode 100644 index 0000000000..22c726e85d --- /dev/null +++ b/model_zoo/vision/yolov5lite/README.md @@ -0,0 +1,130 @@ +# 编译YOLOv5Lite示例 + +当前支持模型版本为:[YOLOv5-Lite-v1.4](https://github.com/ppogg/YOLOv5-Lite/releases/tag/v1.4) + +本文档说明如何进行[YOLOv5Lite](https://github.com/ppogg/YOLOv5-Lite)的快速部署推理。本目录结构如下 + +``` +. +├── cpp +│   ├── CMakeLists.txt +│   ├── README.md +│   └── yolov5lite.cc +├── README.md +└── yolov5lite.py +``` + +## 获取ONNX文件 +- 自动获取 + 访问[YOLOv5Lite](https://github.com/ppogg/YOLOv5-Lite) +官方github库,按照指引下载安装,下载`yolov5-lite-xx.onnx` 模型(Tips:官方提供的ONNX文件目前是没有decode模块的) + ``` + #下载yolov5-lite模型文件(.onnx) + Download from https://drive.google.com/file/d/1bJByk9eoS6pv8Z3N4bcLRCV3i7uk24aU/view + 官方Repo也支持百度云下载 + ``` + +- 手动获取 + + 访问[YOLOv5Lite](https://github.com/ppogg/YOLOv5-Lite) +官方github库,按照指引下载安装,下载`yolov5-lite-xx.pt` 模型,利用 `export.py` 得到`onnx`格式文件。 + + - 导出含有decode模块的ONNX文件 + + 首先需要参考[YOLOv5-Lite#189](https://github.com/ppogg/YOLOv5-Lite/pull/189)的解决办法,修改代码。 + + ``` + #下载yolov5-lite模型文件(.pt) + Download from https://drive.google.com/file/d/1oftzqOREGqDCerf7DtD5BZp9YWELlkMe/view + 官方Repo也支持百度云下载 + + # 导出onnx格式文件 + python export.py --grid --dynamic --concat --weights PATH/TO/yolov5-lite-xx.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolov5lite.onnx PATH/TO/model_zoo/vision/yolov5lite/ + ``` + - 导出无decode模块的ONNX文件(不需要修改代码) + + ``` + #下载yolov5-lite模型文件 + Download from https://drive.google.com/file/d/1oftzqOREGqDCerf7DtD5BZp9YWELlkMe/view + 官方Repo也支持百度云下载 + + # 导出onnx格式文件 + python export.py --grid --dynamic --weights PATH/TO/yolov5-lite-xx.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolov5lite.onnx PATH/TO/model_zoo/vision/yolov5lite/ + ``` +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` + +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python + +# 安装vision-cpu模块 +fastdeploy install vision-cpu +``` + +## 设置ONNX文件处理方式 + +如果ONNX文件是含有decode模块的,设置`model.is_decode_exported = True`(解除yolov5lite.py第12行注释) + +如果ONNX文件是无decode模块的,不用做任何处理,默认是`model.is_decode_exported = False` + +## Python部署 + +执行如下代码即会自动下载测试图片 +``` +python yolov5lite.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +1289.729126,698.414612, 1404.110229, 1023.949524, 0.893141, 0 +300.958649,1027.166992, 449.921753, 1299.823608, 0.887509, 0 +627.481201,823.830750, 718.942078, 1133.402344, 0.885308, 0 +152.969437,1147.352905, 257.228424, 1301.652710, 0.877009, 0 +512.867188,773.371094, 649.768494, 1123.529785, 0.870583, 0 +906.801147,508.160278, 997.325867, 825.934509, 0.867381, 0 +307.480988,87.785973, 408.681732, 387.337463, 0.860646, 0 +783.116821,492.420319, 871.741028, 774.283691, 0.851574, 0 +1347.626343,190.911758, 1452.582031, 459.044617, 0.837095, 0 +3.035009,3.509769, 97.237442, 257.884094, 0.835671, 0 +261.199738,303.971527, 371.036041, 569.222595, 0.834187, 0 +1170.358032,722.587219, 1284.564087, 1036.034302, 0.833685, 0 +660.728333,476.764618, 760.990723, 783.636414, 0.823469, 0 +777.628906,815.975098, 886.895935, 1115.206421, 0.820669, 0 +415.902740,983.790283, 543.582764, 1300.361206, 0.791539, 0 +132.273209,40.751694, 210.614563, 285.128174, 0.790815, 0 +1331.930664,370.903687, 1446.262573, 638.119202, 0.773755, 0 +1254.425293,31.073910, 1352.297241, 312.583282, 0.743923, 0 +915.965088,310.556458, 1031.921265, 624.672302, 0.696823, 0 +499.573517,362.165588, 595.503296, 624.872070, 0.678821, 0 +956.890747,76.389160, 1068.599609, 340.183533, 0.656648, 0 +452.388977,320.288269, 532.330688, 593.987915, 0.652459, 0 +488.305664,1028.187012, 565.136719, 1179.688477, 0.629574, 24 +855.175781,868.482422, 916.516113, 988.196777, 0.555574, 26 +1321.689453,1.638852, 1384.584961, 99.413322, 0.504122, 0 +845.324707,531.824768, 875.939941, 614.515198, 0.472173, 26 +1342.546387,2.096432, 1420.351929, 98.888016, 0.463313, 0 +990.747070,635.389221, 1018.249512, 695.264709, 0.444000, 26 +956.799316,120.643112, 1015.100098, 242.920944, 0.442043, 26 +560.449219,401.270538, 607.763672, 522.486389, 0.434484, 26 +1329.199219,372.522980, 1443.054199, 635.315979, 0.399014, 26 +956.140137,88.526413, 1047.509766, 305.213409, 0.367863, 26 +1379.296875,852.808594, 1406.909180, 916.456055, 0.366000, 26 +1331.909180,468.433624, 1369.299316, 532.044495, 0.352329, 26 +864.880371,915.723633, 916.223145, 990.979980, 0.325205, 26 +260.778809,341.724640, 322.229004, 442.432648, 0.320724, 24 +1271.154785,77.393600, 1336.230469, 186.194870, 0.307823, 26 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [YOLOv5Lite API文档](./api.md) diff --git a/model_zoo/vision/yolov5lite/api.md b/model_zoo/vision/yolov5lite/api.md new file mode 100644 index 0000000000..38cd87725a --- /dev/null +++ b/model_zoo/vision/yolov5lite/api.md @@ -0,0 +1,71 @@ +# YOLOv5Lite API说明 + +## Python API + +### YOLOv5Lite类 +``` +fastdeploy.vision.ppogg.YOLOv5Lite(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) +``` +YOLOv5Lite模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolov5lite.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> YOLOv5Lite.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +示例代码参考[yolov5_lite.py](./yolov5_lite.py) + + +## C++ API + +### YOLOv5Lite类 +``` +fastdeploy::vision::ppogg::YOLOv5Lite( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +YOLOv5Lite模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolov5lite.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOv5Lite::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +示例代码参考[cpp/yolov5lite.cc](cpp/yolov5lite.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/yolov5lite/cpp/CMakeLists.txt b/model_zoo/vision/yolov5lite/cpp/CMakeLists.txt new file mode 100644 index 0000000000..855076a089 --- /dev/null +++ b/model_zoo/vision/yolov5lite/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(yolov5lite_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(yolov5lite_demo ${PROJECT_SOURCE_DIR}/yolov5lite.cc) +# 添加FastDeploy库依赖 +target_link_libraries(yolov5lite_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/yolov5lite/cpp/README.md b/model_zoo/vision/yolov5lite/cpp/README.md new file mode 100644 index 0000000000..495fb81455 --- /dev/null +++ b/model_zoo/vision/yolov5lite/cpp/README.md @@ -0,0 +1,117 @@ +# 编译YOLOv5Lite示例 + +当前支持模型版本为:[YOLOv5-Lite-v1.4](https://github.com/ppogg/YOLOv5-Lite/releases/tag/v1.4) + +## 获取ONNX文件 +- 自动获取 + 访问[YOLOv5Lite](https://github.com/ppogg/YOLOv5-Lite) +官方github库,按照指引下载安装,下载`yolov5-lite-xx.onnx` 模型(Tips:官方提供的ONNX文件目前是没有decode模块的) + ``` + #下载yolov5-lite模型文件(.onnx) + Download from https://drive.google.com/file/d/1bJByk9eoS6pv8Z3N4bcLRCV3i7uk24aU/view + 官方Repo也支持百度云下载 + ``` + +- 手动获取 + + 访问[YOLOv5Lite](https://github.com/ppogg/YOLOv5-Lite) +官方github库,按照指引下载安装,下载`yolov5-lite-xx.pt` 模型,利用 `export.py` 得到`onnx`格式文件。 + + - 导出含有decode模块的ONNX文件 + + 首先需要参考[YOLOv5-Lite#189](https://github.com/ppogg/YOLOv5-Lite/pull/189)的解决办法,修改代码。 + + ``` + #下载yolov5-lite模型文件(.pt) + Download from https://drive.google.com/file/d/1oftzqOREGqDCerf7DtD5BZp9YWELlkMe/view + 官方Repo也支持百度云下载 + + # 导出onnx格式文件 + python export.py --grid --dynamic --concat --weights PATH/TO/yolov5-lite-xx.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolov5lite.onnx PATH/TO/model_zoo/vision/yolov5lite/ + ``` + - 导出无decode模块的ONNX文件(不需要修改代码) + + ``` + #下载yolov5-lite模型文件 + Download from https://drive.google.com/file/d/1oftzqOREGqDCerf7DtD5BZp9YWELlkMe/view + 官方Repo也支持百度云下载 + + # 导出onnx格式文件 + python export.py --grid --dynamic --weights PATH/TO/yolov5-lite-xx.pt + + # 移动onnx文件到demo目录 + cp PATH/TO/yolov5lite.onnx PATH/TO/model_zoo/vision/yolov5lite/ + ``` + +## 设置ONNX文件处理方式 + +如果ONNX文件是含有decode模块的,设置`model.is_decode_exported = true`(解除yolov5lite.cc第27行注释) + +如果ONNX文件是无decode模块的,不用做任何处理,默认是`model.is_decode_exported = false` + +## 运行demo + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 移动onnx文件到demo目录 +cp PATH/TO/yolov5lite.onnx PATH/TO/model_zoo/vision/yolov5lite/cpp/build/ + +# 下载图片 +wget https://raw.githubusercontent.com/ppogg/YOLOv5-Lite/master/cpp_demo/mnn/test.jpg + +# 执行 +./yolov5lite_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +1289.729126,698.414612, 1404.110229, 1023.949524, 0.893141, 0 +300.958649,1027.166992, 449.921753, 1299.823608, 0.887509, 0 +627.481201,823.830750, 718.942078, 1133.402344, 0.885308, 0 +152.969437,1147.352905, 257.228424, 1301.652710, 0.877009, 0 +512.867188,773.371094, 649.768494, 1123.529785, 0.870583, 0 +906.801147,508.160278, 997.325867, 825.934509, 0.867381, 0 +307.480988,87.785973, 408.681732, 387.337463, 0.860646, 0 +783.116821,492.420319, 871.741028, 774.283691, 0.851574, 0 +1347.626343,190.911758, 1452.582031, 459.044617, 0.837095, 0 +3.035009,3.509769, 97.237442, 257.884094, 0.835671, 0 +261.199738,303.971527, 371.036041, 569.222595, 0.834187, 0 +1170.358032,722.587219, 1284.564087, 1036.034302, 0.833685, 0 +660.728333,476.764618, 760.990723, 783.636414, 0.823469, 0 +777.628906,815.975098, 886.895935, 1115.206421, 0.820669, 0 +415.902740,983.790283, 543.582764, 1300.361206, 0.791539, 0 +132.273209,40.751694, 210.614563, 285.128174, 0.790815, 0 +1331.930664,370.903687, 1446.262573, 638.119202, 0.773755, 0 +1254.425293,31.073910, 1352.297241, 312.583282, 0.743923, 0 +915.965088,310.556458, 1031.921265, 624.672302, 0.696823, 0 +499.573517,362.165588, 595.503296, 624.872070, 0.678821, 0 +956.890747,76.389160, 1068.599609, 340.183533, 0.656648, 0 +452.388977,320.288269, 532.330688, 593.987915, 0.652459, 0 +488.305664,1028.187012, 565.136719, 1179.688477, 0.629574, 24 +855.175781,868.482422, 916.516113, 988.196777, 0.555574, 26 +1321.689453,1.638852, 1384.584961, 99.413322, 0.504122, 0 +845.324707,531.824768, 875.939941, 614.515198, 0.472173, 26 +1342.546387,2.096432, 1420.351929, 98.888016, 0.463313, 0 +990.747070,635.389221, 1018.249512, 695.264709, 0.444000, 26 +956.799316,120.643112, 1015.100098, 242.920944, 0.442043, 26 +560.449219,401.270538, 607.763672, 522.486389, 0.434484, 26 +1329.199219,372.522980, 1443.054199, 635.315979, 0.399014, 26 +956.140137,88.526413, 1047.509766, 305.213409, 0.367863, 26 +1379.296875,852.808594, 1406.909180, 916.456055, 0.366000, 26 +1331.909180,468.433624, 1369.299316, 532.044495, 0.352329, 26 +864.880371,915.723633, 916.223145, 990.979980, 0.325205, 26 +260.778809,341.724640, 322.229004, 442.432648, 0.320724, 24 +1271.154785,77.393600, 1336.230469, 186.194870, 0.307823, 26 +``` diff --git a/model_zoo/vision/yolov5lite/cpp/yolov5lite.cc b/model_zoo/vision/yolov5lite/cpp/yolov5lite.cc new file mode 100644 index 0000000000..206143f52b --- /dev/null +++ b/model_zoo/vision/yolov5lite/cpp/yolov5lite.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + auto model = vis::ppogg::YOLOv5Lite("yolov5lite.onnx"); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + cv::Mat im = cv::imread("test.jpg"); + cv::Mat vis_im = im.clone(); + // 如果onnx是有decode模块的,需要修改参数 + // model.is_decode_exported = true; + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite("vis_result.jpg", vis_im); + return 0; +} diff --git a/model_zoo/vision/yolov5lite/yolov5lite.py b/model_zoo/vision/yolov5lite/yolov5lite.py new file mode 100644 index 0000000000..683db22f92 --- /dev/null +++ b/model_zoo/vision/yolov5lite/yolov5lite.py @@ -0,0 +1,24 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +test_jpg_url = "https://raw.githubusercontent.com/ppogg/YOLOv5-Lite/master/cpp_demo/mnn/test.jpg" +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.ppogg.YOLOv5Lite("yolov5lite.onnx") + +# 如果onnx是有decode模块的,需要修改参数 +# model.is_decode_exported = True + +# 预测图片 +im = cv2.imread("test.jpg") +result = model.predict(im, conf_threshold=0.25, nms_iou_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) +print(model.runtime_option)