Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

disable trt conv if stride >1 and trt 6.x #32997

Merged
merged 11 commits into from
May 25, 2021
5 changes: 0 additions & 5 deletions paddle/fluid/inference/tensorrt/convert/activation_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,6 @@ class ActivationOpConverter : public OpConverter {
engine_->GetITensor(op_desc.Input("X")[0]);

auto op_pair = ops.find(op_type_);
if (op_pair == ops.end()) {
PADDLE_THROW(platform::errors::Fatal(
"Wrong activation op type, the trt do not support the %s act type.",
op_type_));
}

nvinfer1::IActivationLayer* layer = TRT_ENGINE_ADD_LAYER(
engine_, Activation, *const_cast<nvinfer1::ITensor*>(input_tensor),
Expand Down
10 changes: 0 additions & 10 deletions paddle/fluid/inference/tensorrt/convert/affine_channel_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,16 +55,6 @@ class AffineChannelOpConverter : public OpConverter {
auto* bias_t = bias_v->GetMutable<framework::LoDTensor>();
float* bias_ptr = engine_->GetWeightCPUData(bias_name, bias_t, false);

auto data_layout = framework::StringToDataLayout(
BOOST_GET_CONST(std::string, op_desc.GetAttr("data_layout")));

PADDLE_ENFORCE_EQ(
data_layout, framework::DataLayout::kNCHW,
platform::errors::InvalidArgument(
"TensorRT affine channel converter can only convert NCHW format. "
"Other format should be run in fluid mode. Report a bug on github "
"issue if you see this line."));

// tensorrt scalend layer only support spatial dims >= 2,
// so nhwc is not availabe (spatial dims == 0)
const int channel_axis = engine_->with_dynamic_shape();
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/inference/tensorrt/convert/elementwise_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,6 @@ static bool CheckDims(const nvinfer1::Dims& dims_x,
return false;
}
for (int i = 0; i < dims_x.nbDims; i++) {
// conservative judgment
if (dims_x.d[i] == -1 || dims_y.d[i] == -1) {
return false;
}
if (dims_x.d[i] != dims_y.d[i]) {
return false;
}
Expand Down
27 changes: 27 additions & 0 deletions paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,19 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("paddings"));

if (paddings.size() > 2) return false;
// strides > 1 is only supported by trt7.0 above
#if !IS_TRT_VERSION_GE(7000)
if (desc.HasAttr("strides")) {
const std::vector<int> strides =
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("strides"));
// there is no issue if strides.size() less than 2
if (strides.size() > 1) {
b3602sss marked this conversation as resolved.
Show resolved Hide resolved
for (size_t i = 0; i < strides.size(); i++) {
if (strides[i] > 1) return false;
}
}
}
#endif
}

if (op_type == "pool2d") {
Expand Down Expand Up @@ -225,6 +238,20 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
<< desc.Output("Output").size() << " output.";
return false;
}

// strides > 1 is only supported by trt7.0 above
#if !IS_TRT_VERSION_GE(7000)
if (desc.HasAttr("strides")) {
const std::vector<int> strides =
BOOST_GET_CONST(std::vector<int>, desc.GetAttr("strides"));
// there is no issue if strides.size() less than 2
if (strides.size() > 1) {
b3602sss marked this conversation as resolved.
Show resolved Hide resolved
for (size_t i = 0; i < strides.size(); i++) {
if (strides[i] > 1) return false;
}
}
}
#endif
}

if (op_type == "matmul") {
Expand Down