diff --git a/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc b/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc index 3d61054ad2620..478db30919052 100644 --- a/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc +++ b/paddle/fluid/framework/new_executor/instruction/onednn/onednn_instruction.cc @@ -86,6 +86,14 @@ static phi::Attribute ConvertPirAttribute2RuntimeAttribute( } } return vec_res; + } else if (attr_type_name == "paddle::dialect::IntArrayAttribute") { + std::vector int_array = + attr.dyn_cast().data().GetData(); + return int_array; + } else if (attr_type_name == "paddle::dialect::DataTypeAttribute") { + phi::DataType dtype = + attr.dyn_cast().data(); + return dtype; } else { PADDLE_THROW(phi::errors::Unimplemented( "ConvertPirAttribute2RuntimeAttribute not support [%s] ", diff --git a/paddle/fluid/pir/dialect/operator/ir/ops_onednn_extra.yaml b/paddle/fluid/pir/dialect/operator/ir/ops_onednn_extra.yaml index ec967b1d23ce5..bac190236306a 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops_onednn_extra.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops_onednn_extra.yaml @@ -17,8 +17,6 @@ # - op : add_n -# - op : add_raw - - op : batch_norm extra_args : bool fuse_with_relu=false data_format_tensors : x @@ -27,13 +25,15 @@ extra_args : bool fuse_with_relu=false data_format_tensors : x, out_grad -# - op : bilinear_interp +- op : bilinear_interp # - op : cast -# - op : clip +- op : clip + extra_args : str mkldnn_data_type="float32" -# - op : clip_grad +- op : clip_grad + extra_args : str mkldnn_data_type="float32" # - op : concat @@ -59,19 +59,17 @@ # - op : depthwise_conv2d_grad -# - op : divide - -# - op : divide_grad +- op : divide -# - op : divide_raw +- op : divide_grad -# - op : elu +- op : elu -# - op : elu_grad +- op : elu_grad -# - op : exp +- op : exp -# - op : exp_grad +- op : exp_grad # - op : expand @@ -87,7 +85,7 @@ # - op : flatten2_grad -# - op : full +- op : full - op : fused_conv2d extra_args : float fuse_alpha = 0.0, float fuse_beta = 0.0, float scale_in=1.0, float scale_out=1.0, float scale_in_eltwise=1.0, float[] scale_weights={1.0f} @@ -115,23 +113,25 @@ # - op : fusion_lstm -# - op : gaussian +- op : gaussian -# - op : gelu +- op : gelu + extra_args : str mkldnn_data_type="float32" -# - op : gelu_grad +- op : gelu_grad + extra_args : str mkldnn_data_type="float32" -# - op : hardswish +- op : hardswish -# - op : hardswish_grad +- op : hardswish_grad # - op : layer_norm -# - op : leaky_relu +- op : leaky_relu -# - op : leaky_relu_grad +- op : leaky_relu_grad -# - op : log_softmax +- op : log_softmax - op : lrn extra_args : bool is_test=false @@ -153,33 +153,25 @@ # - op : matmul_with_flatten_grad -# - op : max - -# - op : max_raw - -# - op : mean - -# - op : mean_grad +- op : max -# - op : mean_raw +- op : mean -# - op : min +- op : mean_grad -# - op : min_raw +- op : min -# - op : mish +- op : mish -# - op : mish_grad +- op : mish_grad -# - op : multi_gru +- op : multi_gru -# - op : multiply +- op : multiply -# - op : multiply_grad +- op : multiply_grad -# - op : multiply_raw - -# - op : nearest_interp +- op : nearest_interp # - op : pad @@ -215,21 +207,22 @@ # - op : reshape2_grad -# - op : round +- op : round -# - op : scale +- op : scale -# - op : sgd +- op : sgd # - op : sgd_dense_param_sparse_grad # - op : shape +# extra_args : str mkldnn_data_type="float32" -# - op : shuffle_channel +- op : shuffle_channel -# - op : sigmoid +- op : sigmoid -# - op : sigmoid_grad +- op : sigmoid_grad # - op : slice @@ -239,15 +232,15 @@ # - op : softmax_grad -# - op : softplus +- op : softplus # - op : split # - op : split_with_num -# - op : sqrt +- op : sqrt -# - op : sqrt_grad +- op : sqrt_grad # - op : squeeze @@ -257,25 +250,23 @@ # - op : stack -# - op : subtract - -# - op : subtract_grad - -# - op : subtract_raw +- op : subtract -# - op : sum +- op : subtract_grad -# - op : sum_grad +- op : sum + extra_args : str mkldnn_data_type="float32" -# - op : sum_raw +- op : sum_grad + extra_args : str mkldnn_data_type="float32" # - op : swish # - op : swish_grad -# - op : tanh +- op : tanh -# - op : tanh_grad +- op : tanh_grad # - op : transpose diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index 2be625c84e2a3..0044a02941841 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -78,7 +78,7 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output() + self.check_output(check_pir_onednn=self.check_pir_onednn) def test_check_grad(self): if self.dtype == np.float16: @@ -128,7 +128,9 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): self.check_grad( @@ -137,6 +139,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def init_dtype(self): @@ -181,10 +184,18 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): - self.check_grad(['X'], 'Out', max_relative_error=0.006, check_pir=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.006, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def init_dtype(self): self.dtype = np.complex64 @@ -255,10 +266,14 @@ def setUp(self): self.convert_input_output() def test_check_grad(self): - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestExpm1_Complex64(TestExpm1): @@ -266,10 +281,14 @@ def init_dtype(self): self.dtype = np.complex64 def test_check_grad(self): - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestExpm1_Complex128(TestExpm1_Complex64): @@ -390,7 +409,11 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) + self.check_output( + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): if self.dtype == np.float16: @@ -402,6 +425,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -411,7 +435,11 @@ def init_dtype(self): def test_check_output(self): with paddle.static.scope_guard(paddle.static.Scope()): - self.check_output(check_prim=False, check_prim_pir=False) + self.check_output( + check_prim=False, + check_prim_pir=False, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): self.check_grad( @@ -421,6 +449,7 @@ def test_check_grad(self): check_prim=False, check_pir=True, check_prim_pir=False, + check_pir_onednn=self.check_pir_onednn, ) @@ -430,7 +459,12 @@ def init_dtype(self): def test_check_grad(self): self.check_grad( - ['X'], 'Out', check_prim=False, check_pir=True, check_prim_pir=False + ['X'], + 'Out', + check_prim=False, + check_pir=True, + check_prim_pir=False, + check_pir_onednn=self.check_pir_onednn, ) @@ -473,7 +507,11 @@ def if_enable_cinn(self): def test_check_output(self): place = core.CUDAPlace(0) self.check_output_with_place( - place, check_prim=True, check_pir=True, check_prim_pir=True + place, + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad(self): @@ -527,16 +565,26 @@ def if_enable_cinn(self): def test_check_output(self): if self.dtype == np.complex64 or self.dtype == np.complex128: - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) else: self.check_output( - check_prim=True, check_pir=True, check_prim_pir=True + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad(self): # TODO(BeingGod): set `check_prim=True` when `fill_constant` supports `complex` dtype if self.dtype == np.complex64 or self.dtype == np.complex128: - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], + 'Out', + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) else: self.check_grad( ['X'], @@ -544,6 +592,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -638,7 +687,13 @@ def setUp(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', max_relative_error=0.008, check_pir=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.008, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestLogSigmoidComplex64(TestLogSigmoid): @@ -733,7 +788,9 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: @@ -746,6 +803,7 @@ def test_check_grad(self): check_prim=False, check_prim_pir=False, check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) else: self.check_grad( @@ -754,6 +812,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def init_dtype(self): @@ -772,7 +831,9 @@ def init_dtype(self): def test_check_output(self): with paddle.static.scope_guard(paddle.static.Scope()): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestTanh_Complex128(TestTanh): @@ -781,7 +842,9 @@ def init_dtype(self): def test_check_output(self): with paddle.static.scope_guard(paddle.static.Scope()): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestTanh_ZeroDim(TestTanh): @@ -874,12 +937,16 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) @test_with_pir_api def test_out_name(self): @@ -941,12 +1008,16 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestSinh_Complex64(TestSinh): @@ -1056,7 +1127,9 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: @@ -1064,10 +1137,19 @@ def test_check_grad(self): if self.dtype == np.complex64 or self.dtype == np.complex128: # Complex64 [CPU]: AssertionError: 0.006845869 not less than or equal to 0.005 self.check_grad( - ['X'], 'Out', max_relative_error=0.007, check_pir=True + ['X'], + 'Out', + max_relative_error=0.007, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) else: - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], + 'Out', + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestCosh_Complex64(TestCosh): @@ -1177,10 +1259,14 @@ def setUp(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestTanhshrink_ZeroDim(TestTanhshrink): @@ -1274,12 +1360,16 @@ def set_attrs(self): pass def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestHardShrink_threshold_negative(TestHardShrink): @@ -1450,12 +1540,16 @@ def setUp(self): self.attrs = {"lambda": threshold} def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestSoftshrink_ZeroDim(TestSoftshrink): @@ -1554,10 +1648,16 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): - self.check_output(check_prim=True, check_pir=True, check_prim_pir=True) + self.check_output( + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestSqrtPrimFp32(TestActivation): @@ -1585,10 +1685,15 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) + self.check_output( + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def init_dtype(self): self.dtype = np.float32 @@ -1636,7 +1741,12 @@ def if_enable_cinn(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, check_pir=True, check_prim_pir=True) + self.check_output_with_place( + place, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): place = core.CUDAPlace(0) @@ -1681,6 +1791,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): @@ -1689,6 +1800,7 @@ def test_check_output(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -1721,6 +1833,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): @@ -1729,6 +1842,7 @@ def test_check_output(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def init_dtype(self): @@ -1765,7 +1879,12 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_prim=True, check_pir=True, check_prim_pir=True) + self.check_output( + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): if self.dtype == np.float16: @@ -1777,6 +1896,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -1818,13 +1938,20 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return self.check_grad( - ['X'], 'Out', check_prim=True, check_pir=True, check_prim_pir=True + ['X'], + 'Out', + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -1852,7 +1979,9 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) # The same reason with TestFloor def test_check_grad(self): @@ -1889,7 +2018,9 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) # the gradient on floor, ceil, round is undefined. # we return zero as gradient, but the numpy return nan @@ -1945,7 +2076,9 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: @@ -1959,6 +2092,7 @@ def test_check_grad(self): check_prim=False, max_relative_error=0.006, check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) else: self.check_grad( @@ -1967,6 +2101,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def if_enable_cinn(self): @@ -2018,12 +2153,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestTan_float32(TestTan): @@ -2113,12 +2252,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestAcos_Complex64(TestAcos): @@ -2167,14 +2310,22 @@ def test_out_name(self): super().test_out_name() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return # TODO(ScottWong98): set `check_prim=False` when `fill_any_like` supports `complex` dtype if self.dtype == np.complex64 or self.dtype == np.complex128: - self.check_grad(['X'], 'Out', check_prim=False, check_pir=True) + self.check_grad( + ['X'], + 'Out', + check_prim=False, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) else: self.check_grad( ['X'], @@ -2182,6 +2333,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def if_enable_cinn(self): @@ -2227,12 +2379,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestAsin_Complex64(TestAsin): @@ -2274,7 +2430,9 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: @@ -2282,10 +2440,19 @@ def test_check_grad(self): if self.dtype == np.complex64: # Complex64[CPU]: AssertionError: 0.012431525 not less than or equal to 0.005 self.check_grad( - ['X'], 'Out', max_relative_error=0.02, check_pir=True + ['X'], + 'Out', + max_relative_error=0.02, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) else: - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], + 'Out', + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestAcosh_Complex64(TestAcosh): @@ -2327,7 +2494,9 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: @@ -2335,10 +2504,19 @@ def test_check_grad(self): if self.dtype == np.complex64 or self.dtype == np.complex128: # Complex64 [CPU]: AssertionError: 0.006898686 not less than or equal to 0.005 self.check_grad( - ['X'], 'Out', max_relative_error=0.007, check_pir=True + ['X'], + 'Out', + max_relative_error=0.007, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) else: - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], + 'Out', + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestAsinh_Complex64(TestAsinh): @@ -2380,12 +2558,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestAtanh_Complex64(TestAtanh): @@ -2422,7 +2604,9 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): pass @@ -2457,11 +2641,21 @@ def test_check_grad(self): if self.dtype == np.float16: return self.check_grad( - ['X'], 'Out', check_prim=True, check_pir=True, check_prim_pir=True + ['X'], + 'Out', + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): - self.check_output(check_prim=True, check_pir=True, check_prim_pir=True) + self.check_output( + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def if_enable_cinn(self): pass @@ -2570,13 +2764,23 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_prim=True, check_pir=True, check_prim_pir=True) + self.check_output( + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): if self.dtype == np.float16: return self.check_grad( - ['X'], 'Out', check_prim=True, check_pir=True, check_prim_pir=True + ['X'], + 'Out', + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -2707,6 +2911,7 @@ def test_check_output(self): check_prim=True, check_pir=True, check_prim_pir=False, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad(self): @@ -2718,6 +2923,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -2753,7 +2959,12 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_prim=True, check_pir=True, check_prim_pir=True) + self.check_output( + check_prim=True, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): if self.dtype == np.float16: @@ -2764,6 +2975,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -2860,12 +3072,16 @@ def setUp(self): self.attrs = {'t_min': t_min, 't_max': t_max} def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def ref_relu6(x, threshold=6.0): @@ -2899,7 +3115,9 @@ def init_shape(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestRelu6_ZeroDim(TestRelu6): @@ -3058,6 +3276,7 @@ def test_check_grad(self): check_prim_pir=True if self.dtype not in [np.complex64, np.complex128] else False, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): @@ -3069,6 +3288,7 @@ def test_check_output(self): check_prim_pir=True if self.dtype not in [np.complex64, np.complex128] else False, + check_pir_onednn=self.check_pir_onednn, ) @@ -3177,13 +3397,19 @@ def setUp(self): self.attrs = {'threshold': threshold} def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output( + check_dygraph=False, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return self.check_grad( - ['X'], 'Out', max_relative_error=0.02, check_dygraph=False + ['X'], + 'Out', + max_relative_error=0.02, + check_dygraph=False, + check_pir_onednn=self.check_pir_onednn, ) @@ -3217,7 +3443,9 @@ def init_shape(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def get_alpha(self): return 1.0 @@ -3338,7 +3566,9 @@ def init_shape(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestCELU_ZeroDim(TestCELU): @@ -3442,15 +3672,25 @@ def test_check_grad(self): return if self.dtype == np.complex64 or self.dtype == np.complex128: self.check_grad( - ['X'], 'Out', max_relative_error=0.03, check_pir=True + ['X'], + 'Out', + max_relative_error=0.03, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) else: self.check_grad( - ['X'], 'Out', max_relative_error=0.01, check_pir=True + ['X'], + 'Out', + max_relative_error=0.01, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestReciprocal_Complex64(TestReciprocal): @@ -3490,7 +3730,9 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: @@ -3501,6 +3743,7 @@ def test_check_grad(self): check_prim=True, check_pir=True, check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -3566,7 +3809,9 @@ def setUp(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) @test_with_pir_api def test_api(self): @@ -3646,12 +3891,16 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestLog10_ZeroDim(TestLog10): @@ -3734,12 +3983,16 @@ def setUp(self): self.convert_input_output() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class Test_Log1p_Op_Fp16(unittest.TestCase): @@ -3849,10 +4102,20 @@ def setUp(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', max_relative_error=0.007, check_pir=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.007, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_output(self): - self.check_output(check_pir=True, check_prim_pir=True) + self.check_output( + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestSquare_Complex64(TestSquare): @@ -3902,7 +4165,12 @@ def init_dtype(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, check_pir=True, check_prim_pir=True) + self.check_output_with_place( + place, + check_pir=True, + check_prim_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): place = core.CUDAPlace(0) @@ -3934,7 +4202,12 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_prim=True, check_prim_pir=True, check_pir=True) + self.check_output( + check_prim=True, + check_prim_pir=True, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad(self): if self.dtype == np.float16: @@ -3945,6 +4218,7 @@ def test_check_grad(self): check_prim=True, check_prim_pir=True, check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) @@ -4028,10 +4302,14 @@ def setUp(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestSTanhScaleA(TestSTanh): @@ -4173,12 +4451,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestSoftplus_Complex64(TestSoftplus): @@ -4186,7 +4468,13 @@ def init_dtype(self): self.dtype = np.complex64 def test_check_grad(self): - self.check_grad(['X'], 'Out', max_relative_error=0.06, check_pir=True) + self.check_grad( + ['X'], + 'Out', + max_relative_error=0.06, + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestSoftplus_Complex128(TestSoftplus): @@ -4224,7 +4512,9 @@ def init_dtype(self): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, check_pir=True) + self.check_output_with_place( + place, check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): place = core.CUDAPlace(0) @@ -4319,12 +4609,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestSoftsign_Complex64(TestSoftsign): @@ -4426,10 +4720,14 @@ def init_shape(self): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestThresholdedRelu_ZeroDim(TestThresholdedRelu): @@ -4531,12 +4829,16 @@ def set_attrs(self): pass def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestHardSigmoidFP32(TestHardSigmoid): @@ -4651,9 +4953,7 @@ def test_check_grad(self): if self.dtype == np.float16: return self.check_grad( - ['X'], - 'Out', - check_pir=True, + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn ) @@ -4753,12 +5053,16 @@ def init_shape(self): self.shape = [10, 12] def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad( + ['X'], 'Out', check_pir=True, check_pir_onednn=self.check_pir_onednn + ) class TestMish_ZeroDim(TestMish): @@ -4889,6 +5193,7 @@ def test_check_output(self): check_prim=check_prim, check_prim_pir=check_prim_pir, check_pir=check_pir, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad(self): @@ -5089,6 +5394,7 @@ def test_check_output(self): check_prim=check_prim, check_pir=check_pir, check_prim_pir=check_prim_pir, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad(self): diff --git a/test/legacy_test/test_elementwise_mul_op.py b/test/legacy_test/test_elementwise_mul_op.py index 0787bf4f5104a..e22544f4298a8 100644 --- a/test/legacy_test/test_elementwise_mul_op.py +++ b/test/legacy_test/test_elementwise_mul_op.py @@ -50,6 +50,7 @@ def test_check_output(self): self.check_output( check_dygraph=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_normal(self): @@ -61,6 +62,7 @@ def test_check_grad_normal(self): check_prim=True, check_prim_pir=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_x(self): @@ -73,6 +75,7 @@ def test_check_grad_ingore_x(self): check_prim=True, check_prim_pir=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_y(self): @@ -85,6 +88,7 @@ def test_check_grad_ingore_y(self): check_prim=True, check_prim_pir=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) def init_input_output(self): @@ -132,13 +136,30 @@ def if_enable_cinn(self): self.enable_cinn = False def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', check_pir=True) + self.check_grad( + ['X', 'Y'], + 'Out', + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], 'Out', no_grad_set=set("X"), check_pir=True) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_pir=True) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestElementwiseMulOp_ZeroDim1(ElementwiseMulOp): @@ -189,7 +210,9 @@ def setUp(self): self.if_enable_cinn() def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad_normal(self): self.check_grad( @@ -198,6 +221,7 @@ def test_check_grad_normal(self): check_prim=True, check_prim_pir=True, check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_x(self): @@ -208,6 +232,7 @@ def test_check_grad_ingore_x(self): check_prim=True, check_prim_pir=True, check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_y(self): @@ -218,6 +243,7 @@ def test_check_grad_ingore_y(self): check_prim=True, check_prim_pir=True, check_pir=True, + check_pir_onednn=self.check_pir_onednn, ) def if_enable_cinn(self): @@ -275,6 +301,7 @@ def test_check_output(self): self.check_output( check_dygraph=self.check_dygraph, check_pir=self.check_dygraph, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_normal(self): @@ -284,6 +311,7 @@ def test_check_grad_normal(self): check_dygraph=self.check_dygraph, check_prim=self.check_prim, check_pir=self.check_dygraph, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_x(self): @@ -294,6 +322,7 @@ def test_check_grad_ingore_x(self): check_dygraph=self.check_dygraph, check_prim=self.check_prim, check_pir=self.check_dygraph, + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_y(self): @@ -304,6 +333,7 @@ def test_check_grad_ingore_y(self): check_dygraph=self.check_dygraph, check_prim=self.check_prim, check_pir=self.check_dygraph, + check_pir_onednn=self.check_pir_onednn, ) def init_input_attr_output(self): @@ -422,7 +452,10 @@ def if_enable_cinn(self): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output(check_dygraph=(not self.use_mkldnn)) + self.check_output( + check_dygraph=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad_normal(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -433,6 +466,7 @@ def test_check_grad_normal(self): check_prim=True, check_prim_pir=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_x(self): @@ -445,6 +479,7 @@ def test_check_grad_ingore_x(self): check_prim=True, check_prim_pir=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) def test_check_grad_ingore_y(self): @@ -457,6 +492,7 @@ def test_check_grad_ingore_y(self): check_prim=True, check_prim_pir=(not self.use_mkldnn), check_pir=(not self.use_mkldnn), + check_pir_onednn=self.check_pir_onednn, ) @@ -535,16 +571,35 @@ def init_input_output(self): self.out = self.x * self.y def test_check_output(self): - self.check_output(check_pir=True) + self.check_output( + check_pir=True, check_pir_onednn=self.check_pir_onednn + ) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', check_pir=True) + self.check_grad( + ['X', 'Y'], + 'Out', + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad_ingore_x(self): - self.check_grad(['Y'], 'Out', no_grad_set=set("X"), check_pir=True) + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) def test_check_grad_ingore_y(self): - self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_pir=True) + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + check_pir=True, + check_pir_onednn=self.check_pir_onednn, + ) class TestRealComplexElementwiseMulOp(TestComplexElementwiseMulOp): diff --git a/test/legacy_test/test_sgd_op_bf16.py b/test/legacy_test/test_sgd_op_bf16.py index e7d0497fd7876..3baf0a490cbf5 100644 --- a/test/legacy_test/test_sgd_op_bf16.py +++ b/test/legacy_test/test_sgd_op_bf16.py @@ -23,6 +23,7 @@ convert_float_to_uint16, convert_uint16_to_float, ) +from utils import compare_legacy_with_pt import paddle from paddle import base @@ -55,7 +56,9 @@ def conf(self): self.w = 105 def test_check_output(self): - self.check_output_with_place(core.CPUPlace(), check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), check_dygraph=False, check_pir_onednn=True + ) @unittest.skipIf( @@ -330,6 +333,7 @@ def _data_reader(self): data = np.random.randint(0, 9, self.ids_shape).astype("int64") yield data, label + @compare_legacy_with_pt def test_sgd(self): place = base.CPUPlace() main = base.Program() diff --git a/test/mkldnn/test_activation_bf16_mkldnn_op.py b/test/mkldnn/test_activation_bf16_mkldnn_op.py index c97aa65c432f6..05db15d30d018 100644 --- a/test/mkldnn/test_activation_bf16_mkldnn_op.py +++ b/test/mkldnn/test_activation_bf16_mkldnn_op.py @@ -77,6 +77,7 @@ def test_check_grad(self): class TestMKLDNNSigmoidBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "sigmoid" + self.check_pir_onednn = True def op_forward(self, x): return 1 / (1 + np.exp(-x)) @@ -88,6 +89,7 @@ def op_grad(self, dout, x): class TestMKLDNNSqrtBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "sqrt" + self.check_pir_onednn = True def init_data(self): self.x = np.random.uniform(1, 2, [2, 4, 3, 5]).astype(np.float32) @@ -102,6 +104,7 @@ def op_grad(self, dout, x): class TestMKLDNNGeluErfBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "gelu" + self.check_pir_onednn = True def op_forward(self, x): return gelu(x, False) @@ -122,6 +125,7 @@ def init_data(self): class TestMKLDNNGeluTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "gelu" + self.check_pir_onednn = True def op_forward(self, x): return gelu(x, True) @@ -166,6 +170,7 @@ def op_grad(self, dout, x): class TestMKLDNNMishBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "mish" + self.check_pir_onednn = True def op_forward(self, x): return x * np.tanh(np.log(1 + np.exp(x))) @@ -195,6 +200,7 @@ def op_grad(self, dout, x): class TestMKLDNNLeakyReluBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "leaky_relu" + self.check_pir_onednn = True def op_forward(self, x): return np.where(x > 0, x, self.alpha * x) @@ -228,6 +234,7 @@ def set_attrs(self): class TestMKLDNNHardSwishBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "hard_swish" + self.check_pir_onednn = True def op_forward(self, x): result = np.where(x < -3, 0, x) @@ -241,6 +248,7 @@ def op_grad(self, dout, x): class TestMKLDNNTanhBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "tanh" + self.check_pir_onednn = True def op_forward(self, x): return np.tanh(x) @@ -264,6 +272,7 @@ def op_grad(self, dout, x): class TestMKLDNNEluBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "elu" + self.check_pir_onednn = True def op_forward(self, x): return np.where(x > 0, x, self.alpha * (np.exp(x) - 1)) @@ -279,6 +288,7 @@ def set_attrs(self): class TestMKLDNNExpBF16Op(MKLDNNBF16ActivationOp, TestActivation): def config(self): self.op_type = "exp" + self.check_pir_onednn = True def op_forward(self, x): return np.exp(x) diff --git a/test/mkldnn/test_activation_mkldnn_op.py b/test/mkldnn/test_activation_mkldnn_op.py index 765464bdb698a..bd8c2e7b2fbd4 100644 --- a/test/mkldnn/test_activation_mkldnn_op.py +++ b/test/mkldnn/test_activation_mkldnn_op.py @@ -92,17 +92,20 @@ def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) class TestMKLDNNLeakyRelu_ZeroDim(TestLeakyRelu_ZeroDim): @@ -110,6 +113,7 @@ def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -127,6 +131,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNGelu_ZeroDim(TestActivation_ZeroDim): @@ -141,6 +146,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNGeluDim2Approx(TestActivation): @@ -155,6 +161,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True, "approximate": True} + self.check_pir_onednn = True class TestMKLDNNTanhDim2(TestTanh): @@ -162,6 +169,7 @@ def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -172,6 +180,7 @@ def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -182,6 +191,7 @@ def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -192,6 +202,7 @@ def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -240,12 +251,14 @@ class TestMKLDNNHardSwishDim2(TestHardSwish): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNHardSwish_ZeroDim(TestHardSwish_ZeroDim): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNSigmoidDim2(TestSigmoid): @@ -289,17 +302,20 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) class TestMKLDNNGeluDim4(TestActivation): @@ -314,6 +330,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNGeluDim4Approx(TestActivation): @@ -328,6 +345,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True, "approximate": True} + self.check_pir_onednn = True @unittest.skipIf( @@ -345,9 +363,10 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def test_check_output(self): - self.check_output_with_place(core.CPUPlace()) + self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) def test_check_grad(self): pass @@ -368,9 +387,10 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True, "approximate": True} + self.check_pir_onednn = True def test_check_output(self): - self.check_output_with_place(core.CPUPlace()) + self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) def test_check_grad(self): pass @@ -385,6 +405,7 @@ def setUp(self): } self.outputs = {'Out': np.tanh(self.inputs['X'])} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNSqrtDim4(TestSqrt): @@ -396,6 +417,7 @@ def setUp(self): } self.outputs = {'Out': np.sqrt(self.inputs['X'])} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNAbsDim4(TestAbs): @@ -439,6 +461,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -456,6 +479,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNMish_ZeroDim(TestActivation_ZeroDim): @@ -470,6 +494,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True class TestMKLDNNRound(TestActivation): @@ -482,14 +507,15 @@ def setUp(self): self.inputs = {'X': x} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def test_check_output(self): - self.check_output(check_pir=True) + self.check_output(check_pir=True, check_pir_onednn=True) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad(['X'], 'Out', check_pir=True, check_pir_onednn=True) class TestMKLDNNRound_ZeroDim(TestActivation_ZeroDim): @@ -502,14 +528,15 @@ def setUp(self): self.inputs = {'X': x} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + self.check_pir_onednn = True def test_check_output(self): - self.check_output(check_pir=True) + self.check_output(check_pir=True, check_pir_onednn=True) def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', check_pir=True) + self.check_grad(['X'], 'Out', check_pir=True, check_pir_onednn=True) class TestMKLDNNSigmoidDim4(TestSigmoid): @@ -537,6 +564,7 @@ def setUp(self): 'Out': np.maximum(0, x) + np.minimum(0, self.alpha * (np.exp(x) - 1)) } + self.check_pir_onednn = True def set_alpha(self): self.alpha = 1.0 @@ -556,6 +584,7 @@ def setUp(self): 'Out': np.maximum(0, x) + np.minimum(0, self.alpha * (np.exp(x) - 1)) } + self.check_pir_onednn = True def set_alpha(self): self.alpha = 1.0 @@ -575,6 +604,7 @@ def setUp(self): self.inputs = {'X': x} self.attrs = {'use_mkldnn': True} self.outputs = {'Out': np.exp(x)} + self.check_pir_onednn = True class TestMKLDNNExpOp_ZeroDim(TestActivation_ZeroDim): @@ -586,6 +616,7 @@ def setUp(self): self.inputs = {'X': x} self.attrs = {'use_mkldnn': True} self.outputs = {'Out': np.exp(x)} + self.check_pir_onednn = True # Check if primitives already exist in backward @@ -617,6 +648,7 @@ class TestMKLDNNSoftplusDim2(TestSoftplus): def setUp(self): super().setUp() self.attrs.update({"use_mkldnn": True}) + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 diff --git a/test/mkldnn/test_clip_mkldnn_op.py b/test/mkldnn/test_clip_mkldnn_op.py index 752193d35b64b..abb67e439d080 100644 --- a/test/mkldnn/test_clip_mkldnn_op.py +++ b/test/mkldnn/test_clip_mkldnn_op.py @@ -62,10 +62,12 @@ def set_attrs(self): self.attrs = {'min': 7.2, 'max': 9.6, 'use_mkldnn': True} def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) class TestClipOneDNNOp_ZeroDim(TestClipOneDNNOp): @@ -114,7 +116,9 @@ def calculate_grads(self): self.dx[j][i] = self.dout[j][i] def test_check_output(self): - self.check_output_with_place(core.CPUPlace(), check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), check_dygraph=False, check_pir_onednn=True + ) def test_check_grad(self): self.calculate_grads() @@ -125,6 +129,7 @@ def test_check_grad(self): user_defined_grads=[self.dx], user_defined_grad_outputs=[convert_float_to_uint16(self.dout)], check_dygraph=False, + check_pir_onednn=True, ) cls_name = "{}_{}".format(parent.__name__, "BF16") diff --git a/test/mkldnn/test_elementwise_div_mkldnn_op.py b/test/mkldnn/test_elementwise_div_mkldnn_op.py index bc3d340aba969..1e7f934e56f91 100644 --- a/test/mkldnn/test_elementwise_div_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_div_mkldnn_op.py @@ -46,13 +46,19 @@ def init_input_output(self): self.out = np.divide(self.x, self.y) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', None, 0.005, False, 0.02) + self.check_grad( + ['X', 'Y'], 'Out', None, 0.005, False, 0.02, check_pir_onednn=True + ) def test_check_grad_ignore_x(self): - self.check_grad(['Y'], 'Out', set("X"), 0.005, False, 0.02) + self.check_grad( + ['Y'], 'Out', set("X"), 0.005, False, 0.02, check_pir_onednn=True + ) def test_check_grad_ignore_y(self): - self.check_grad(['X'], 'Out', set('Y'), 0.005, False, 0.02) + self.check_grad( + ['X'], 'Out', set('Y'), 0.005, False, 0.02, check_pir_onednn=True + ) def init_axis(self): self.axis = -1 @@ -64,7 +70,7 @@ def init_dtype(self): self.dtype = np.float32 def test_check_output(self): - self.check_output() + self.check_output(check_pir_onednn=True) class TestMKLDNNElementwiseDivOp2(TestMKLDNNElementwiseDivOp): @@ -171,7 +177,7 @@ def init_input_output(self): self.out = np.divide(self.x, self.y) def test_check_output(self): - self.check_output_with_place(core.CPUPlace()) + self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) def test_check_grad_normal(self): self.check_grad_with_place( diff --git a/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py b/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py index b339faa3093bd..3da3e82f6785a 100644 --- a/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py @@ -48,7 +48,7 @@ def generate_data(self): self.out = np.multiply(self.x, self.y) def test_check_output(self): - self.check_output_with_place(core.CPUPlace()) + self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) def test_check_grad_normal(self): self.check_grad_with_place( @@ -61,6 +61,7 @@ def test_check_grad_normal(self): np.multiply(self.x, self.x), ], user_defined_grad_outputs=[self.x_bf16], + check_pir_onednn=True, ) def test_check_grad_ingore_x(self): @@ -71,6 +72,7 @@ def test_check_grad_ingore_x(self): check_dygraph=False, user_defined_grads=[np.multiply(self.y, self.x)], user_defined_grad_outputs=[self.y_bf16], + check_pir_onednn=True, ) def test_check_grad_ingore_y(self): @@ -81,6 +83,7 @@ def test_check_grad_ingore_y(self): check_dygraph=False, user_defined_grads=[np.multiply(self.x, self.y)], user_defined_grad_outputs=[self.x_bf16], + check_pir_onednn=True, ) diff --git a/test/mkldnn/test_elementwise_mul_onednn_op.py b/test/mkldnn/test_elementwise_mul_onednn_op.py index ce5a7b58502f5..ec923a3a4f55f 100644 --- a/test/mkldnn/test_elementwise_mul_onednn_op.py +++ b/test/mkldnn/test_elementwise_mul_onednn_op.py @@ -24,6 +24,7 @@ class TestOneDNNElementwiseMulOp(ElementwiseMulOp): def init_kernel_type(self): self.use_mkldnn = True + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.float32 @@ -132,6 +133,7 @@ class TestInt8(ElementwiseMulOp): def init_kernel_type(self): self.use_mkldnn = True self._cpu_only = True + self.check_pir_onednn = True def init_dtype(self): self.dtype = np.int8 diff --git a/test/mkldnn/test_elementwise_sub_onednn_op.py b/test/mkldnn/test_elementwise_sub_onednn_op.py index 1f9415499efbd..f6932cc177b80 100644 --- a/test/mkldnn/test_elementwise_sub_onednn_op.py +++ b/test/mkldnn/test_elementwise_sub_onednn_op.py @@ -77,7 +77,7 @@ def init_dtype(self): self.dtype = np.float32 def test_check_output(self): - self.check_output(check_pir=True) + self.check_output(check_pir=True, check_pir_onednn=True) def if_check_prim(self): self.check_prim = self.axis == -1 @@ -242,7 +242,7 @@ def init_input_output(self): self.out = np.subtract(self.x, self.y) def test_check_output(self): - self.check_output_with_place(core.CPUPlace()) + self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) def test_check_grad_normal(self): self.check_grad_with_place( @@ -251,6 +251,7 @@ def test_check_grad_normal(self): "Out", user_defined_grads=[self.x, -self.x], user_defined_grad_outputs=[self.x_bf16], + check_pir_onednn=True, ) def test_check_grad_ignore_x(self): @@ -260,6 +261,7 @@ def test_check_grad_ignore_x(self): "Out", user_defined_grads=[-self.y], user_defined_grad_outputs=[self.y_bf16], + check_pir_onednn=True, ) def test_check_grad_ignore_y(self): @@ -269,6 +271,7 @@ def test_check_grad_ignore_y(self): "Out", user_defined_grads=[self.x], user_defined_grad_outputs=[self.x_bf16], + check_pir_onednn=True, ) @@ -291,6 +294,7 @@ def test_check_grad_normal(self): "Out", user_defined_grads=[self.x, self.compute_reduced_gradients(self.x)], user_defined_grad_outputs=[self.x_bf16], + check_pir_onednn=True, ) def test_check_grad_ignore_x(self): @@ -300,6 +304,7 @@ def test_check_grad_ignore_x(self): "Out", user_defined_grads=[self.compute_reduced_gradients(self.x)], user_defined_grad_outputs=[self.x_bf16], + check_pir_onednn=True, ) diff --git a/test/mkldnn/test_fill_constant_mkldnn_op.py b/test/mkldnn/test_fill_constant_mkldnn_op.py index d293110ee7293..562a0dd0ae503 100644 --- a/test/mkldnn/test_fill_constant_mkldnn_op.py +++ b/test/mkldnn/test_fill_constant_mkldnn_op.py @@ -60,7 +60,7 @@ def set_attrs(self): self.attrs = {'shape': (3, 5), 'use_mkldnn': True, 'value': self.value} def test_check_output(self): - self.check_output() + self.check_output(check_pir_onednn=True) class TestFillZerosLike4DShapeTensorPriorityOneDNNOp( diff --git a/test/mkldnn/test_log_softmax_mkldnn_op.py b/test/mkldnn/test_log_softmax_mkldnn_op.py index 8d4dff23a73e0..7c997e1653202 100644 --- a/test/mkldnn/test_log_softmax_mkldnn_op.py +++ b/test/mkldnn/test_log_softmax_mkldnn_op.py @@ -53,7 +53,9 @@ def set_axis(self): self.axis = -1 def test_check_output(self): - self.check_output_with_place(core.CPUPlace(), check_dygraph=False) + self.check_output_with_place( + core.CPUPlace(), check_dygraph=False, check_pir_onednn=True + ) class TestLogSoftmax0DOneDNNOp(TestLogSoftmaxOneDNNOp): diff --git a/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py b/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py index ec3f40dfcd8f6..94b63bc4c0981 100644 --- a/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py +++ b/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py @@ -151,7 +151,7 @@ def setUp(self): self.outputs = {'Out': output_np} def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) class TestNearestInterpOpV2MKLDNNNHWC(TestNearestInterpV2MKLDNNOp): diff --git a/test/mkldnn/test_reduce_bf16_mkldnn_op.py b/test/mkldnn/test_reduce_bf16_mkldnn_op.py index 1d0e0e596dcb8..50997eebaef4e 100644 --- a/test/mkldnn/test_reduce_bf16_mkldnn_op.py +++ b/test/mkldnn/test_reduce_bf16_mkldnn_op.py @@ -40,7 +40,9 @@ def setUp(self): self.attrs = {'use_mkldnn': self.use_mkldnn} def test_check_output(self): - self.check_output(check_dygraph=False, check_pir=False) + self.check_output( + check_dygraph=False, check_pir=False, check_pir_onednn=True + ) def calculate_grads(self): tmp_tensor = np.zeros(self.x_fp32.shape).astype("float32") @@ -85,6 +87,7 @@ def test_check_grad(self): user_defined_grads=[self.grad_X], user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)], check_pir=False, + check_pir_onednn=True, ) diff --git a/test/mkldnn/test_scale_bf16_mkldnn_op.py b/test/mkldnn/test_scale_bf16_mkldnn_op.py index 3f0a2be49290c..45de56b4aba56 100644 --- a/test/mkldnn/test_scale_bf16_mkldnn_op.py +++ b/test/mkldnn/test_scale_bf16_mkldnn_op.py @@ -54,7 +54,7 @@ def calculate_grads(self): self.dx = self.out * scale def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): self.calculate_grads() @@ -65,6 +65,7 @@ def test_check_grad(self): check_dygraph=False, user_defined_grads=[self.dx], user_defined_grad_outputs=[convert_float_to_uint16(self.out)], + check_pir_onednn=True, ) diff --git a/test/mkldnn/test_scale_mkldnn_op.py b/test/mkldnn/test_scale_mkldnn_op.py index 54b412bf8f155..736f41ec82770 100644 --- a/test/mkldnn/test_scale_mkldnn_op.py +++ b/test/mkldnn/test_scale_mkldnn_op.py @@ -35,10 +35,12 @@ def init_shape(self): self.shape = [10, 10] def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) class TestScaleOp_ZeroDim(TestScaleOp): @@ -62,10 +64,12 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) class TestScaleOpScaleTensor(OpTest): @@ -80,10 +84,12 @@ def setUp(self): self.outputs = {'Out': self.inputs['X'] * self.scale} def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) class TestScaleOpScaleTensorNotBiasAfterScale(OpTest): @@ -101,10 +107,12 @@ def setUp(self): } def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_pir_onednn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad( + ['X'], 'Out', check_dygraph=False, check_pir_onednn=True + ) if __name__ == "__main__": diff --git a/test/mkldnn/test_shuffle_channel_mkldnn_op.py b/test/mkldnn/test_shuffle_channel_mkldnn_op.py index 90ca2e44ed986..e9510c9636961 100644 --- a/test/mkldnn/test_shuffle_channel_mkldnn_op.py +++ b/test/mkldnn/test_shuffle_channel_mkldnn_op.py @@ -44,7 +44,7 @@ def set_group(self): self.group = 4 def test_check_output(self): - self.check_output_with_place(core.CPUPlace()) + self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) class TestShuffleChannelSingleGroupOneDNNOp(TestShuffleChannelOneDNNOp): diff --git a/test/mkldnn/test_softplus_mkldnn_op.py b/test/mkldnn/test_softplus_mkldnn_op.py index 58e4fafdf2698..0949b63cc2c59 100644 --- a/test/mkldnn/test_softplus_mkldnn_op.py +++ b/test/mkldnn/test_softplus_mkldnn_op.py @@ -56,7 +56,7 @@ def set_dtype(self): self.dtype = np.float32 def test_check_output(self): - self.check_output() + self.check_output(check_pir_onednn=True) class TestSoftplus4DOneDNNOp(TestSoftplusOneDNNOp):