diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index f3dcaf06cd9bf..8bc7cff200b34 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -20,11 +20,7 @@ from paddle import _C_ops from ..base.data_feeder import check_type, check_variable_and_dtype -from ..base.framework import ( - in_dygraph_mode, - in_dynamic_or_pir_mode, - in_pir_mode, -) +from ..base.framework import in_dynamic_or_pir_mode, in_pir_mode from ..common_ops_import import Variable from ..framework import LayerHelper, core from .creation import _complex_to_real_dtype, assign @@ -300,7 +296,7 @@ def real(x, name=None): [[1., 2., 3.], [4., 5., 6.]]) """ - if in_dygraph_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.real(x) else: check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') @@ -348,7 +344,7 @@ def imag(x, name=None): [[6., 5., 4.], [3., 2., 1.]]) """ - if in_dygraph_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.imag(x) else: check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 9b50993b89166..0c9bafb8b564c 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -139,7 +139,7 @@ def logical_and(x, y, out=None, name=None): [True , False, True , False]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.logical_and(x, y) return _logical_op( @@ -413,7 +413,7 @@ def equal_all(x, y, name=None): Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True, False) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.equal_all(x, y) else: helper = LayerHelper("equal_all", **locals()) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index ae61880c997be..10a5f239535ad 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1413,7 +1413,7 @@ def flip(x, axis, name=None): if isinstance(axis, int): axis = [axis] - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.flip(x, axis) else: helper = LayerHelper("flip", **locals()) @@ -3430,7 +3430,7 @@ def expand_as(x, y, name=None): [[1, 2, 3], [1, 2, 3]]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.expand_as(x, None, y.shape) else: check_variable_and_dtype( diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index ad60f962d73b9..fb08264314c27 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -4437,7 +4437,7 @@ def isnan(x, name=None): Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True, [False, False, False, False, False, True , True ]) """ - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.isnan(x) else: helper = LayerHelper("isnan_v2", **locals()) @@ -5209,7 +5209,7 @@ def logit(x, eps=None, name=None): """ if eps is None: eps = 0.0 - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): return _C_ops.logit(x, eps) else: check_variable_and_dtype( diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 8fd2473231f93..c1392b5ccc121 100755 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -459,7 +459,7 @@ def nonzero(x, as_tuple=False): shape = x.shape rank = len(shape) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): outs = _C_ops.nonzero(x) else: check_variable_and_dtype( diff --git a/test/legacy_test/test_compare_reduce_op.py b/test/legacy_test/test_compare_reduce_op.py index e281407c242b0..fdd08b2990cfe 100644 --- a/test/legacy_test/test_compare_reduce_op.py +++ b/test/legacy_test/test_compare_reduce_op.py @@ -32,7 +32,7 @@ def setUp(self): self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_pir=True) cls_name = "{}_{}_{}".format(op_type, typename, 'not_equal_all') Cls.__name__ = cls_name @@ -51,7 +51,7 @@ def setUp(self): self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_pir=True) cls_name = "{}_{}_{}".format(op_type, typename, 'not_shape_equal_all') Cls.__name__ = cls_name @@ -69,7 +69,7 @@ def setUp(self): self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_pir=True) cls_name = "{}_{}_{}".format(op_type, typename, 'equal_all') Cls.__name__ = cls_name @@ -89,7 +89,7 @@ def setUp(self): self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_pir=True) cls_name = "{}_{}_{}".format(op_type, typename, 'equal_all') Cls.__name__ = cls_name diff --git a/test/legacy_test/test_expand_as_v2_op.py b/test/legacy_test/test_expand_as_v2_op.py index 13aa6863b9bd6..6b11c2f8dee99 100755 --- a/test/legacy_test/test_expand_as_v2_op.py +++ b/test/legacy_test/test_expand_as_v2_op.py @@ -20,6 +20,7 @@ import paddle from paddle import base from paddle.base import core +from paddle.pir_utils import test_with_pir_api class TestExpandAsBasic(OpTest): @@ -48,10 +49,10 @@ def if_enable_cinn(self): pass def test_check_output(self): - self.check_output(check_prim=True) + self.check_output(check_prim=True, check_pir=True) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) + self.check_grad(['X'], 'Out', check_prim=True, check_pir=True) class TestExpandAs_ZeroDim1(TestExpandAsBasic): @@ -104,11 +105,11 @@ def if_enable_cinn(self): self.enable_cinn = False def test_check_output(self): - self.check_output_with_place(place=paddle.CUDAPlace(0)) + self.check_output_with_place(place=paddle.CUDAPlace(0), check_pir=True) def test_check_grad(self): self.check_grad_with_place( - paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True + paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True, check_pir=True ) @@ -242,7 +243,7 @@ def setUp(self): self.outputs = {'Out': convert_float_to_uint16(output)} def test_check_output(self): - self.check_output_with_place(place=paddle.CUDAPlace(0)) + self.check_output_with_place(place=paddle.CUDAPlace(0), check_pir=True) def test_check_grad(self): pass @@ -261,26 +262,28 @@ def test_errors(self): # Test python API class TestExpandAsV2API(unittest.TestCase): + @test_with_pir_api def test_api(self): - input1 = np.random.random([12, 14]).astype("float32") - input2 = np.random.random([2, 12, 14]).astype("float32") - x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") - - y = paddle.static.data( - name='target_tensor', - shape=[2, 12, 14], - dtype="float32", - ) - - out_1 = paddle.expand_as(x, y=y) - - exe = base.Executor(place=base.CPUPlace()) - res_1 = exe.run( - base.default_main_program(), - feed={"x": input1, "target_tensor": input2}, - fetch_list=[out_1], - ) - np.testing.assert_array_equal(res_1[0], np.tile(input1, (2, 1, 1))) + with paddle.static.program_guard(paddle.static.Program()): + input1 = np.random.random([12, 14]).astype("float32") + input2 = np.random.random([2, 12, 14]).astype("float32") + x = paddle.static.data(name='x', shape=[12, 14], dtype="float32") + + y = paddle.static.data( + name='target_tensor', + shape=[2, 12, 14], + dtype="float32", + ) + + out_1 = paddle.expand_as(x, y=y) + + exe = base.Executor(place=base.CPUPlace()) + res_1 = exe.run( + paddle.static.default_main_program(), + feed={"x": input1, "target_tensor": input2}, + fetch_list=[out_1], + ) + np.testing.assert_array_equal(res_1[0], np.tile(input1, (2, 1, 1))) if __name__ == "__main__": diff --git a/test/legacy_test/test_flip.py b/test/legacy_test/test_flip.py index 4e5cc58ad3312..e4f729ded8234 100644 --- a/test/legacy_test/test_flip.py +++ b/test/legacy_test/test_flip.py @@ -100,10 +100,10 @@ def init_attrs(self): self.attrs = {"axis": self.axis} def test_check_output(self): - self.check_output(check_cinn=True) + self.check_output(check_cinn=True, check_pir=True) def test_check_grad(self): - self.check_grad(["X"], "Out", check_cinn=True) + self.check_grad(["X"], "Out", check_cinn=True, check_pir=True) def init_test_case(self): self.in_shape = (6, 4, 2, 3) @@ -167,12 +167,16 @@ def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place, check_cinn=True) + self.check_output_with_place( + place, check_cinn=True, check_pir=True + ) def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad_with_place(place, ["X"], "Out", check_cinn=True) + self.check_grad_with_place( + place, ["X"], "Out", check_cinn=True, check_pir=True + ) cls_name = "{}_{}".format(parent.__name__, "FP16OP") TestFlipFP16.__name__ = cls_name @@ -202,12 +206,12 @@ def init_dtype(self): def test_check_output(self): place = core.CUDAPlace(0) if core.is_bfloat16_supported(place): - self.check_output_with_place(place) + self.check_output_with_place(place, check_pir=True) def test_check_grad(self): place = core.CUDAPlace(0) if core.is_bfloat16_supported(place): - self.check_grad_with_place(place, ["X"], "Out") + self.check_grad_with_place(place, ["X"], "Out", check_pir=True) cls_name = "{}_{}".format(parent.__name__, "BF16OP") TestFlipBF16.__name__ = cls_name diff --git a/test/legacy_test/test_full_op.py b/test/legacy_test/test_full_op.py index 74e928e58a52a..0281d41252a27 100644 --- a/test/legacy_test/test_full_op.py +++ b/test/legacy_test/test_full_op.py @@ -19,60 +19,63 @@ import paddle from paddle import base from paddle.base import Program, program_guard +from paddle.pir_utils import test_with_pir_api # Test python API class TestFullAPI(unittest.TestCase): + @test_with_pir_api def test_api(self): - positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) + with paddle.static.program_guard(paddle.static.Program()): + positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) - positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) - shape_tensor_int32 = paddle.static.data( - name="shape_tensor_int32", shape=[2], dtype="int32" - ) + positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) + shape_tensor_int32 = paddle.static.data( + name="shape_tensor_int32", shape=[2], dtype="int32" + ) - shape_tensor_int64 = paddle.static.data( - name="shape_tensor_int64", shape=[2], dtype="int64" - ) + shape_tensor_int64 = paddle.static.data( + name="shape_tensor_int64", shape=[2], dtype="int64" + ) - out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) + out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1) - out_2 = paddle.full( - shape=[1, positive_2_int32], dtype="float32", fill_value=1.1 - ) + out_2 = paddle.full( + shape=[1, positive_2_int32], dtype="float32", fill_value=1.1 + ) - out_3 = paddle.full( - shape=[1, positive_2_int64], dtype="float32", fill_value=1.1 - ) + out_3 = paddle.full( + shape=[1, positive_2_int64], dtype="float32", fill_value=1.1 + ) - out_4 = paddle.full( - shape=shape_tensor_int32, dtype="float32", fill_value=1.2 - ) + out_4 = paddle.full( + shape=shape_tensor_int32, dtype="float32", fill_value=1.2 + ) - out_5 = paddle.full( - shape=shape_tensor_int64, dtype="float32", fill_value=1.1 - ) + out_5 = paddle.full( + shape=shape_tensor_int64, dtype="float32", fill_value=1.1 + ) - out_6 = paddle.full( - shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1 - ) + out_6 = paddle.full( + shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1 + ) - val = paddle.tensor.fill_constant( - shape=[1], dtype=np.float32, value=1.1 - ) - out_7 = paddle.full( - shape=shape_tensor_int64, dtype=np.float32, fill_value=val - ) + val = paddle.tensor.fill_constant( + shape=[1], dtype=np.float32, value=1.1 + ) + out_7 = paddle.full( + shape=shape_tensor_int64, dtype=np.float32, fill_value=val + ) - exe = base.Executor(place=base.CPUPlace()) - res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( - base.default_main_program(), - feed={ - "shape_tensor_int32": np.array([1, 2]).astype("int32"), - "shape_tensor_int64": np.array([1, 2]).astype("int64"), - }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], - ) + exe = base.Executor(place=base.CPUPlace()) + res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( + paddle.static.default_main_program(), + feed={ + "shape_tensor_int32": np.array([1, 2]).astype("int32"), + "shape_tensor_int64": np.array([1, 2]).astype("int64"), + }, + fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7], + ) np.testing.assert_array_equal( res_1, np.full([1, 2], 1.1, dtype="float32") diff --git a/test/legacy_test/test_logical_op.py b/test/legacy_test/test_logical_op.py index 98e15878cdfb6..81dec36e2f698 100755 --- a/test/legacy_test/test_logical_op.py +++ b/test/legacy_test/test_logical_op.py @@ -67,6 +67,7 @@ } +# @test_with_pir_api def run_static(x_np, y_np, op_str, use_gpu=False, binary_op=True): paddle.enable_static() startup_program = Program() diff --git a/test/legacy_test/test_logit_op.py b/test/legacy_test/test_logit_op.py index b2f2e21af25ee..641fc68e1832d 100644 --- a/test/legacy_test/test_logit_op.py +++ b/test/legacy_test/test_logit_op.py @@ -58,10 +58,12 @@ def set_attrs(self): self.eps = 1e-8 def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad]) + self.check_grad( + ['X'], ['Out'], user_defined_grads=[self.x_grad], check_pir=True + ) class TestLogitOpFp32(TestLogitOp): @@ -71,10 +73,12 @@ def set_attrs(self): self.eps = 1e-8 def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad]) + self.check_grad( + ['X'], ['Out'], user_defined_grads=[self.x_grad], check_pir=True + ) class TestLogitOpFp16(TestLogitOp): @@ -84,10 +88,12 @@ def set_attrs(self): self.eps = 1e-8 def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad]) + self.check_grad( + ['X'], ['Out'], user_defined_grads=[self.x_grad], check_pir=True + ) @unittest.skipIf( @@ -115,7 +121,7 @@ def set_attrs(self): def test_check_output(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) - self.check_output_with_place(place) + self.check_output_with_place(place, check_pir=True) def test_check_grad(self): if core.is_compiled_with_cuda(): @@ -125,6 +131,7 @@ def test_check_grad(self): ['X'], ['Out'], user_defined_grads=[self.x_grad], + check_pir=True, ) diff --git a/test/legacy_test/test_nonzero_api.py b/test/legacy_test/test_nonzero_api.py index a57e1d9803c22..a14c72a22a149 100644 --- a/test/legacy_test/test_nonzero_api.py +++ b/test/legacy_test/test_nonzero_api.py @@ -29,6 +29,7 @@ def call_nonzero(x): class TestNonZeroAPI(unittest.TestCase): def test_nonzero_api_as_tuple(self): + paddle.enable_static() data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') @@ -61,6 +62,7 @@ def test_nonzero_api_as_tuple(self): np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05) def test_nonzero_api(self): + paddle.enable_static() data = np.array([[True, False], [False, True]]) with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') @@ -108,7 +110,7 @@ def setUp(self): self.outputs = self.return_outputs() def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def init_shape(self): self.shape = [8, 8] @@ -156,7 +158,7 @@ def setUp(self): self.outputs = self.return_outputs() def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def init_shape(self): self.shape = [12, 9] diff --git a/test/legacy_test/test_real_imag_op.py b/test/legacy_test/test_real_imag_op.py index f714cef69e6d4..cfc9ea2112c65 100644 --- a/test/legacy_test/test_real_imag_op.py +++ b/test/legacy_test/test_real_imag_op.py @@ -19,6 +19,7 @@ import paddle from paddle import base, static +from paddle.pir_utils import test_with_pir_api numpy_apis = { "real": np.real, @@ -57,7 +58,7 @@ def init_grad_input_output(self): ) def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): self.check_grad( @@ -99,6 +100,7 @@ def setUp(self): self.places.append(paddle.CUDAPlace(0)) self._shape = [2, 20, 2, 3] + @test_with_pir_api def test_in_static_mode(self): def init_input_output(dtype): input = np.random.random(self._shape).astype( @@ -114,7 +116,7 @@ def init_input_output(dtype): out = paddle_apis[self.api](x) exe = static.Executor(place) - out_value = exe.run(feed=input_dict, fetch_list=[out.name]) + out_value = exe.run(feed=input_dict, fetch_list=[out]) np.testing.assert_array_equal(np_res, out_value[0]) def test_in_dynamic_mode(self):