From cd74b20759d9f41c5a0064d6c9ecdaf160594ff0 Mon Sep 17 00:00:00 2001 From: cc <52520497+juncaipeng@users.noreply.github.com> Date: Fri, 2 Apr 2021 14:46:14 +0800 Subject: [PATCH] Add more ops to calculate output scales (#32036) --- .../slim/quantization/imperative/qat.py | 2 +- .../slim/quantization/imperative/utils.py | 34 +++++++------------ 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index f4620ff00013c..66b11d1f17ad4 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -468,7 +468,7 @@ def _is_target_layer(self, layer): """ Whether the layer needs to calculate output scales. """ - return isinstance(layer, tuple(utils.quant_output_layers_map.values())) \ + return isinstance(layer, utils.quant_output_layers) \ or ('quantized' in layer.full_name() and \ 'quantized_noweight' not in layer.full_name()) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py b/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py index f45eb8c97f419..004e1c1aa9bc5 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/utils.py @@ -43,28 +43,18 @@ "fake_quantize_dequantize_moving_average_abs_max" ] -quant_output_layers_map = { - 'Conv2D': paddle.nn.Conv2D, - 'Conv2DTranspose': paddle.nn.Conv2DTranspose, - 'Linear': paddle.nn.Linear, - 'AdaptiveAvgPool2D': paddle.nn.AdaptiveAvgPool2D, - 'AdaptiveMaxPool2D': paddle.nn.AdaptiveMaxPool2D, - 'AvgPool2D': paddle.nn.AvgPool2D, - 'MaxPool2D': paddle.nn.MaxPool2D, - 'BatchNorm': paddle.nn.BatchNorm, - 'BatchNorm2D': paddle.nn.BatchNorm2D, - 'SyncBatchNorm': paddle.nn.SyncBatchNorm, - 'ELU': paddle.nn.ELU, - 'GELU': paddle.nn.GELU, - 'LeakyReLU': paddle.nn.LeakyReLU, - 'PReLU': paddle.nn.PReLU, - 'ReLU': paddle.nn.ReLU, - 'ReLU6': paddle.nn.ReLU6, - 'Sigmoid': paddle.nn.Sigmoid, - 'Softmax': paddle.nn.Softmax, - 'Tanh': paddle.nn.Tanh, - 'Swish': paddle.nn.Swish, -} +quant_output_layers = ( + paddle.nn.Conv2D, paddle.nn.Conv2DTranspose, paddle.nn.Linear, + paddle.nn.AdaptiveAvgPool2D, paddle.nn.AdaptiveMaxPool2D, + paddle.nn.AvgPool2D, paddle.nn.MaxPool2D, paddle.nn.BatchNorm, + paddle.nn.BatchNorm2D, paddle.nn.LayerNorm, paddle.nn.SyncBatchNorm, + paddle.nn.ELU, paddle.nn.GELU, paddle.nn.Hardshrink, paddle.nn.Hardsigmoid, + paddle.nn.Hardswish, paddle.nn.Hardtanh, paddle.nn.LeakyReLU, + paddle.nn.LogSigmoid, paddle.nn.LogSoftmax, paddle.nn.Maxout, + paddle.nn.PReLU, paddle.nn.ReLU, paddle.nn.ReLU6, paddle.nn.SELU, + paddle.nn.Sigmoid, paddle.nn.Softmax, paddle.nn.Softplus, + paddle.nn.Softshrink, paddle.nn.Softsign, paddle.nn.Swish, paddle.nn.Tanh, + paddle.nn.Tanhshrink, paddle.nn.ThresholdedReLU, paddle.nn.Upsample) weight_op_types = [ "conv2d", "depthwise_conv2d", "matmul", "conv2d_transpose",