Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PIR API adaptor No.66、68、110、136】Migrate some ops into pir #58287

Merged
merged 22 commits into from
Oct 25, 2023
Merged
3 changes: 1 addition & 2 deletions python/paddle/nn/initializer/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,9 @@ def forward(self, var, block=None):
if self._force_cpu:
place = core.CPUPlace()
if in_dygraph_mode():
_C_ops.full_(
return _C_ops.full_(
0x45f marked this conversation as resolved.
Show resolved Hide resolved
var, var.shape, float(self._value), var.dtype, place
)
return None
else:
return _C_ops.full(
var.shape, float(self._value), var.dtype, place
Expand Down
10 changes: 3 additions & 7 deletions python/paddle/tensor/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,7 @@
from paddle import _C_ops

from ..base.data_feeder import check_type, check_variable_and_dtype
from ..base.framework import (
in_dygraph_mode,
in_dynamic_or_pir_mode,
in_pir_mode,
)
from ..base.framework import in_dynamic_or_pir_mode, in_pir_mode
from ..common_ops_import import Variable
from ..framework import LayerHelper, core
from .creation import _complex_to_real_dtype, assign
Expand Down Expand Up @@ -300,7 +296,7 @@ def real(x, name=None):
[[1., 2., 3.],
[4., 5., 6.]])
"""
if in_dygraph_mode():
if in_dynamic_or_pir_mode():
return _C_ops.real(x)
else:
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real')
Expand Down Expand Up @@ -348,7 +344,7 @@ def imag(x, name=None):
[[6., 5., 4.],
[3., 2., 1.]])
"""
if in_dygraph_mode():
if in_dynamic_or_pir_mode():
return _C_ops.imag(x)
else:
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag')
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def logical_and(x, y, out=None, name=None):
[True , False, True , False])

"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.logical_and(x, y)

return _logical_op(
Expand Down Expand Up @@ -413,7 +413,7 @@ def equal_all(x, y, name=None):
Tensor(shape=[], dtype=bool, place=Place(cpu), stop_gradient=True,
False)
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.equal_all(x, y)
else:
helper = LayerHelper("equal_all", **locals())
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1413,7 +1413,7 @@ def flip(x, axis, name=None):
if isinstance(axis, int):
axis = [axis]

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.flip(x, axis)
else:
helper = LayerHelper("flip", **locals())
Expand Down Expand Up @@ -3430,7 +3430,7 @@ def expand_as(x, y, name=None):
[[1, 2, 3],
[1, 2, 3]])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.expand_as(x, None, y.shape)
else:
check_variable_and_dtype(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -4437,7 +4437,7 @@ def isnan(x, name=None):
Tensor(shape=[7], dtype=bool, place=Place(cpu), stop_gradient=True,
[False, False, False, False, False, True , True ])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.isnan(x)
else:
helper = LayerHelper("isnan_v2", **locals())
Expand Down Expand Up @@ -5209,7 +5209,7 @@ def logit(x, eps=None, name=None):
"""
if eps is None:
eps = 0.0
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.logit(x, eps)
else:
check_variable_and_dtype(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ def nonzero(x, as_tuple=False):
shape = x.shape
rank = len(shape)

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
outs = _C_ops.nonzero(x)
else:
check_variable_and_dtype(
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_compare_reduce_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def setUp(self):
self.op_type = op_type

def test_output(self):
self.check_output()
self.check_output(check_pir=True)

cls_name = "{}_{}_{}".format(op_type, typename, 'not_equal_all')
Cls.__name__ = cls_name
Expand All @@ -51,7 +51,7 @@ def setUp(self):
self.op_type = op_type

def test_output(self):
self.check_output()
self.check_output(check_pir=True)

cls_name = "{}_{}_{}".format(op_type, typename, 'not_shape_equal_all')
Cls.__name__ = cls_name
Expand All @@ -69,7 +69,7 @@ def setUp(self):
self.op_type = op_type

def test_output(self):
self.check_output()
self.check_output(check_pir=True)

cls_name = "{}_{}_{}".format(op_type, typename, 'equal_all')
Cls.__name__ = cls_name
Expand All @@ -89,7 +89,7 @@ def setUp(self):
self.op_type = op_type

def test_output(self):
self.check_output()
self.check_output(check_pir=True)

cls_name = "{}_{}_{}".format(op_type, typename, 'equal_all')
Cls.__name__ = cls_name
Expand Down
51 changes: 27 additions & 24 deletions test/legacy_test/test_expand_as_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
from paddle import base
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


class TestExpandAsBasic(OpTest):
Expand Down Expand Up @@ -48,10 +49,10 @@ def if_enable_cinn(self):
pass

def test_check_output(self):
self.check_output(check_prim=True)
self.check_output(check_prim=True, check_pir=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True, check_pir=True)


class TestExpandAs_ZeroDim1(TestExpandAsBasic):
Expand Down Expand Up @@ -104,11 +105,11 @@ def if_enable_cinn(self):
self.enable_cinn = False

def test_check_output(self):
self.check_output_with_place(place=paddle.CUDAPlace(0))
self.check_output_with_place(place=paddle.CUDAPlace(0), check_pir=True)

def test_check_grad(self):
self.check_grad_with_place(
paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True
paddle.CUDAPlace(0), ['X'], 'Out', check_prim=True, check_pir=True
)


Expand Down Expand Up @@ -242,7 +243,7 @@ def setUp(self):
self.outputs = {'Out': convert_float_to_uint16(output)}

def test_check_output(self):
self.check_output_with_place(place=paddle.CUDAPlace(0))
self.check_output_with_place(place=paddle.CUDAPlace(0), check_pir=True)

def test_check_grad(self):
pass
Expand All @@ -261,26 +262,28 @@ def test_errors(self):

# Test python API
class TestExpandAsV2API(unittest.TestCase):
@test_with_pir_api
def test_api(self):
input1 = np.random.random([12, 14]).astype("float32")
input2 = np.random.random([2, 12, 14]).astype("float32")
x = paddle.static.data(name='x', shape=[12, 14], dtype="float32")

y = paddle.static.data(
name='target_tensor',
shape=[2, 12, 14],
dtype="float32",
)

out_1 = paddle.expand_as(x, y=y)

exe = base.Executor(place=base.CPUPlace())
res_1 = exe.run(
base.default_main_program(),
feed={"x": input1, "target_tensor": input2},
fetch_list=[out_1],
)
np.testing.assert_array_equal(res_1[0], np.tile(input1, (2, 1, 1)))
with paddle.static.program_guard(paddle.static.Program()):
input1 = np.random.random([12, 14]).astype("float32")
input2 = np.random.random([2, 12, 14]).astype("float32")
x = paddle.static.data(name='x', shape=[12, 14], dtype="float32")

y = paddle.static.data(
name='target_tensor',
shape=[2, 12, 14],
dtype="float32",
)

out_1 = paddle.expand_as(x, y=y)

exe = base.Executor(place=base.CPUPlace())
res_1 = exe.run(
paddle.static.default_main_program(),
feed={"x": input1, "target_tensor": input2},
fetch_list=[out_1],
)
np.testing.assert_array_equal(res_1[0], np.tile(input1, (2, 1, 1)))


if __name__ == "__main__":
Expand Down
16 changes: 10 additions & 6 deletions test/legacy_test/test_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,10 @@ def init_attrs(self):
self.attrs = {"axis": self.axis}

def test_check_output(self):
self.check_output(check_cinn=True)
self.check_output(check_cinn=True, check_pir=True)

def test_check_grad(self):
self.check_grad(["X"], "Out", check_cinn=True)
self.check_grad(["X"], "Out", check_cinn=True, check_pir=True)

def init_test_case(self):
self.in_shape = (6, 4, 2, 3)
Expand Down Expand Up @@ -167,12 +167,16 @@ def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, check_cinn=True)
self.check_output_with_place(
place, check_cinn=True, check_pir=True
)

def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(place, ["X"], "Out", check_cinn=True)
self.check_grad_with_place(
place, ["X"], "Out", check_cinn=True, check_pir=True
)

cls_name = "{}_{}".format(parent.__name__, "FP16OP")
TestFlipFP16.__name__ = cls_name
Expand Down Expand Up @@ -202,12 +206,12 @@ def init_dtype(self):
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_bfloat16_supported(place):
self.check_output_with_place(place)
self.check_output_with_place(place, check_pir=True)

def test_check_grad(self):
place = core.CUDAPlace(0)
if core.is_bfloat16_supported(place):
self.check_grad_with_place(place, ["X"], "Out")
self.check_grad_with_place(place, ["X"], "Out", check_pir=True)

cls_name = "{}_{}".format(parent.__name__, "BF16OP")
TestFlipBF16.__name__ = cls_name
Expand Down
81 changes: 42 additions & 39 deletions test/legacy_test/test_full_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,60 +19,63 @@
import paddle
from paddle import base
from paddle.base import Program, program_guard
from paddle.pir_utils import test_with_pir_api


# Test python API
class TestFullAPI(unittest.TestCase):
@test_with_pir_api
def test_api(self):
positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)
with paddle.static.program_guard(paddle.static.Program()):
positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2)

positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
shape_tensor_int32 = paddle.static.data(
name="shape_tensor_int32", shape=[2], dtype="int32"
)
positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2)
shape_tensor_int32 = paddle.static.data(
name="shape_tensor_int32", shape=[2], dtype="int32"
)

shape_tensor_int64 = paddle.static.data(
name="shape_tensor_int64", shape=[2], dtype="int64"
)
shape_tensor_int64 = paddle.static.data(
name="shape_tensor_int64", shape=[2], dtype="int64"
)

out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1)
out_1 = paddle.full(shape=[1, 2], dtype="float32", fill_value=1.1)

out_2 = paddle.full(
shape=[1, positive_2_int32], dtype="float32", fill_value=1.1
)
out_2 = paddle.full(
shape=[1, positive_2_int32], dtype="float32", fill_value=1.1
)

out_3 = paddle.full(
shape=[1, positive_2_int64], dtype="float32", fill_value=1.1
)
out_3 = paddle.full(
shape=[1, positive_2_int64], dtype="float32", fill_value=1.1
)

out_4 = paddle.full(
shape=shape_tensor_int32, dtype="float32", fill_value=1.2
)
out_4 = paddle.full(
shape=shape_tensor_int32, dtype="float32", fill_value=1.2
)

out_5 = paddle.full(
shape=shape_tensor_int64, dtype="float32", fill_value=1.1
)
out_5 = paddle.full(
shape=shape_tensor_int64, dtype="float32", fill_value=1.1
)

out_6 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1
)
out_6 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=1.1
)

val = paddle.tensor.fill_constant(
shape=[1], dtype=np.float32, value=1.1
)
out_7 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=val
)
val = paddle.tensor.fill_constant(
shape=[1], dtype=np.float32, value=1.1
)
out_7 = paddle.full(
shape=shape_tensor_int64, dtype=np.float32, fill_value=val
)

exe = base.Executor(place=base.CPUPlace())
res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
base.default_main_program(),
feed={
"shape_tensor_int32": np.array([1, 2]).astype("int32"),
"shape_tensor_int64": np.array([1, 2]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7],
)
exe = base.Executor(place=base.CPUPlace())
res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run(
paddle.static.default_main_program(),
feed={
"shape_tensor_int32": np.array([1, 2]).astype("int32"),
"shape_tensor_int64": np.array([1, 2]).astype("int64"),
},
fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7],
)

np.testing.assert_array_equal(
res_1, np.full([1, 2], 1.1, dtype="float32")
Expand Down
1 change: 1 addition & 0 deletions test/legacy_test/test_logical_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
}


# @test_with_pir_api
def run_static(x_np, y_np, op_str, use_gpu=False, binary_op=True):
paddle.enable_static()
startup_program = Program()
Expand Down
Loading