Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fixed missing trt_backend option & remove un-need data layout check in Cast #14

Merged
merged 8 commits into from
Jul 12, 2022
11 changes: 11 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,12 @@
fastdeploy/libs/lib*
build
cmake-build-debug
cmake-build-release
.vscode
FastDeploy.cmake
fastdeploy/core/config.h
build-debug.sh
*dist
fastdeploy.egg-info
.setuptools-cmake-build
fastdeploy/version.py
3 changes: 2 additions & 1 deletion fastdeploy/backends/tensorrt/trt_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ std::vector<int> toVec(const nvinfer1::Dims& dim) {
return out;
}

bool TrtBackend::InitFromTrt(const std::string& trt_engine_file) {
bool TrtBackend::InitFromTrt(const std::string& trt_engine_file,
const TrtBackendOption& option) {
if (initialized_) {
FDERROR << "TrtBackend is already initlized, cannot initialize again."
<< std::endl;
Expand Down
3 changes: 2 additions & 1 deletion fastdeploy/backends/tensorrt/trt_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@ class TrtBackend : public BaseBackend {
bool InitFromOnnx(const std::string& model_file,
const TrtBackendOption& option = TrtBackendOption(),
bool from_memory_buffer = false);
bool InitFromTrt(const std::string& trt_engine_file);
bool InitFromTrt(const std::string& trt_engine_file,
const TrtBackendOption& option = TrtBackendOption());

bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs);

Expand Down
34 changes: 22 additions & 12 deletions fastdeploy/vision/common/processors/cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,30 +18,40 @@ namespace fastdeploy {
namespace vision {

bool Cast::CpuRun(Mat* mat) {
if (mat->layout != Layout::CHW) {
FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl;
return false;
}
cv::Mat* im = mat->GetCpuMat();
int c = im->channels();
if (dtype_ == "float") {
im->convertTo(*im, CV_32FC(im->channels()));
if (im->type() != CV_32FC(c)) {
im->convertTo(*im, CV_32FC(c));
}
} else if (dtype_ == "double") {
im->convertTo(*im, CV_64FC(im->channels()));
if (im->type() != CV_64FC(c)) {
im->convertTo(*im, CV_64FC(c));
}
} else {
FDLogger() << "[WARN] Cast not support for " << dtype_
<< " now! will skip this operation."
<< std::endl;
}
return true;
}

#ifdef ENABLE_OPENCV_CUDA
bool Cast::GpuRun(Mat* mat) {
if (mat->layout != Layout::CHW) {
FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl;
return false;
}
cv::cuda::GpuMat* im = mat->GetGpuMat();
int c = im->channels();
if (dtype_ == "float") {
im->convertTo(*im, CV_32FC(im->channels()));
if (im->type() != CV_32FC(c)) {
im->convertTo(*im, CV_32FC(c));
}
} else if (dtype_ == "double") {
im->convertTo(*im, CV_64FC(im->channels()));
if (im->type() != CV_64FC(c)) {
im->convertTo(*im, CV_64FC(c));
}
} else {
FDLogger() << "[WARN] Cast not support for " << dtype_
<< " now! will skip this operation."
<< std::endl;
}
return true;
}
Expand Down
2 changes: 1 addition & 1 deletion fastdeploy/vision/ppcls/model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ bool Model::BuildPreprocessPipelineFromConfig() {
return false;
}
auto preprocess_cfg = cfg["PreProcess"]["transform_ops"];
processors_.push_back(std::make_shared<RGB2BGR>());
processors_.push_back(std::make_shared<BGR2RGB>());
for (const auto& op : preprocess_cfg) {
FDASSERT(op.IsMap(),
"Require the transform information in yaml be Map type.");
Expand Down