Skip to content

Commit

Permalink
fix conv2d_transpose trt bugs (PaddlePaddle#33242)
Browse files Browse the repository at this point in the history
  • Loading branch information
cryoco committed Jun 7, 2021
1 parent f17d643 commit ac0289d
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 7 deletions.
19 changes: 12 additions & 7 deletions paddle/fluid/inference/tensorrt/convert/conv2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,11 +103,18 @@ void ConvertConv2d(TensorRTEngine* engine, const framework::proto::OpDesc& op,

TensorRTEngine::Weight bias{nvinfer1::DataType::kFLOAT,
static_cast<void*>(bias_data), bias_size};
auto* layer = fadd_layer(const_cast<nvinfer1::ITensor*>(X), n_output, n_input,
nv_ksize, weight, bias);
PADDLE_ENFORCE_NOT_NULL(layer,
platform::errors::Fatal("TensorRT create conv2d"
" layer error."));
// In conv2d_transpose and depthwise_conv2d_transpose,
// output channels = filter_dims[1] * groups
auto* layer = (op_desc.Type() == "conv2d_transpose" ||
op_desc.Type() == "depthwise_conv2d_transpose")
? fadd_layer(const_cast<nvinfer1::ITensor*>(X),
n_input * groups, nv_ksize, weight, bias)
: fadd_layer(const_cast<nvinfer1::ITensor*>(X), n_output,
nv_ksize, weight, bias);

PADDLE_ENFORCE_NOT_NULL(
layer, platform::errors::Fatal("TensorRT create conv2d/conv2d_transpose"
" layer failed."));
layer->setStride(nv_strides);
layer->setPadding(nv_paddings);
layer->setNbGroups(groups);
Expand All @@ -134,7 +141,6 @@ class Conv2dOpConverter : public OpConverter {
ConvertConv2d(
engine_, op, scope, test_mode,
[&](nvinfer1::ITensor* inputs, int n_output, /* Conv output maps */
int n_input, /* Conv input maps */
nvinfer1::DimsHW& ksize, TensorRTEngine::Weight& weight,
TensorRTEngine::Weight& bias) -> nvinfer1::IConvolutionLayer* {
auto* layer =
Expand All @@ -156,7 +162,6 @@ class Deconv2dOpConverter : public OpConverter {
ConvertConv2d(
engine_, op, scope, test_mode,
[&](nvinfer1::ITensor* inputs, int n_output, /* Deconv input maps */
int n_input, /* Deconv output maps */
nvinfer1::DimsHW& ksize, TensorRTEngine::Weight& weight,
TensorRTEngine::Weight& bias) -> nvinfer1::IDeconvolutionLayer* {
auto* layer =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def setUp(self):
groups=self.conv_groups,
padding=self.conv_padding,
bias_attr=False,
use_cudnn=self.use_cudnn,
act=None)
self.feeds = {
"data": np.random.random([1, 6, 64, 64]).astype("float32"),
Expand All @@ -50,6 +51,7 @@ def set_params(self):
self.conv_filter_size = 6
self.conv_groups = 3
self.conv_padding = [1, 1]
self.use_cudnn = True

def test_check_output(self):
if core.is_compiled_with_cuda():
Expand All @@ -65,6 +67,7 @@ def set_params(self):
self.conv_filter_size = 6
self.conv_groups = 3
self.conv_padding = 'VALID'
self.use_cudnn = True


class TensorRTSubgraphPassConvSamePaddingTest(InferencePassTest):
Expand All @@ -73,6 +76,7 @@ def set_params(self):
self.conv_filter_size = 6
self.conv_groups = 3
self.conv_padding = 'SAME'
self.use_cudnn = True


class TensorRTSubgraphPassDepthwiseConvTest(TensorRTSubgraphPassConvTest):
Expand All @@ -81,6 +85,16 @@ def set_params(self):
self.conv_filter_size = 6
self.conv_groups = 6
self.conv_padding = [1, 1]
self.use_cudnn = False


class TensorRTSubgraphPassDepthwiseConv2Test(TensorRTSubgraphPassConvTest):
def set_params(self):
self.conv_num_filters = 12
self.conv_filter_size = 6
self.conv_groups = 6
self.conv_padding = [1, 1]
self.use_cudnn = False


class TensorRTSubgraphPassConvTransposeTest(InferencePassTest):
Expand Down Expand Up @@ -151,6 +165,16 @@ def set_params(self):
self.use_cudnn = True


class TensorRTSubgraphPassConvTranspose2Test(
TensorRTSubgraphPassConvTransposeTest):
def set_params(self):
self.conv_num_filters = 12
self.conv_filter_size = 4
self.conv_groups = 6
self.conv_padding = [1, 1]
self.use_cudnn = False


class TensorRTSubgraphPassDepthwiseConvTransposeTest(
TensorRTSubgraphPassConvTransposeTest):
def set_params(self):
Expand Down

0 comments on commit ac0289d

Please sign in to comment.