Skip to content

Commit

Permalink
【PIR API adaptor No.45-47】Migrate some ops into pir (#58682)
Browse files Browse the repository at this point in the history
  • Loading branch information
longranger2 authored Nov 20, 2023
1 parent cbafa02 commit d963050
Show file tree
Hide file tree
Showing 6 changed files with 74 additions and 39 deletions.
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def base_softmax_with_cross_entropy(
)
if input_dims - 1 == label_dims:
label = paddle.unsqueeze(label, axis=axis)
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
softmax, loss = _C_ops.cross_entropy_with_softmax(
logits,
label,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1467,7 +1467,7 @@ def cross(x, y, axis=9, name=None):
[0., 0., 0.],
[0., 0., 0.]])
"""
if in_dynamic_mode():
if in_dynamic_or_pir_mode():
axis = K_DEFAULT_DIM if axis is None else axis
return _C_ops.cross(x, y, axis)
else:
Expand Down
12 changes: 9 additions & 3 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -781,10 +781,16 @@ def crop(x, shape=None, offsets=None, name=None):
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'crop_tensor'
)
check_type(
shape, 'shape', (list, tuple, Variable, type(None)), 'crop_tensor'
shape,
'shape',
(list, tuple, Variable, type(None), paddle.pir.OpResult),
'crop_tensor',
)
check_type(
offsets, 'offsets', (list, tuple, Variable, type(None)), 'crop_tensor'
offsets,
'offsets',
(list, tuple, Variable, type(None), paddle.pir.OpResult),
'crop_tensor',
)

if offsets is None:
Expand All @@ -793,7 +799,7 @@ def crop(x, shape=None, offsets=None, name=None):
if shape is None:
shape = x.shape

if in_dynamic_mode():
if in_dynamic_or_pir_mode():
return _C_ops.crop(x, shape, offsets)

out = helper.create_variable_for_type_inference(x.dtype)
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/test_crop_tensor_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,10 @@ def initTestCase(self):
self.offsets = [1, 2]

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_pir=True)


class TestCase1(TestCropTensorOp):
Expand Down Expand Up @@ -182,10 +182,10 @@ def initTestCase(self):
self.shape_attr = [0, 0]

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad_normal(self):
self.check_grad(["X"], "Out")
self.check_grad(["X"], "Out", check_pir=True)


class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr):
Expand Down
38 changes: 27 additions & 11 deletions test/legacy_test/test_cross_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@

import paddle
from paddle import base
from paddle.base import Program, core, program_guard
from paddle.base import core
from paddle.pir_utils import test_with_pir_api


class TestCrossOp(OpTest):
Expand Down Expand Up @@ -47,10 +48,10 @@ def init_output(self):
self.outputs = {'Out': np.array(z_list).reshape(self.shape)}

def test_check_output(self):
self.check_output()
self.check_output(check_pir=True)

def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_pir=True)


class TestCrossOpCase1(TestCrossOp):
Expand Down Expand Up @@ -116,13 +117,15 @@ def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_bfloat16_supported(place):
self.check_output_with_place(place)
self.check_output_with_place(place, check_pir=True)

def test_check_grad_normal(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_bfloat16_supported(place):
self.check_grad_with_place(place, ['X', 'Y'], 'Out')
self.check_grad_with_place(
place, ['X', 'Y'], 'Out', check_pir=True
)


class TestCrossAPI(unittest.TestCase):
Expand All @@ -134,43 +137,56 @@ def input_data(self):
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
).astype('float32')

@test_with_pir_api
def test_cross_api(self):
self.input_data()

main = paddle.static.Program()
startup = paddle.static.Program()
# case 1:
with program_guard(Program(), Program()):
with paddle.static.program_guard(main, startup):
x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32")
y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32")
z = paddle.cross(x, y, axis=1)
exe = base.Executor(base.CPUPlace())
(res,) = exe.run(
main,
feed={'x': self.data_x, 'y': self.data_y},
fetch_list=[z.name],
fetch_list=[z],
return_numpy=False,
)
expect_out = np.array(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
)
np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05)

main = paddle.static.Program()
startup = paddle.static.Program()
# case 2:
with program_guard(Program(), Program()):
with paddle.static.program_guard(main, startup):
x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32")
y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32")
z = paddle.cross(x, y)
exe = base.Executor(base.CPUPlace())
(res,) = exe.run(
main,
feed={'x': self.data_x, 'y': self.data_y},
fetch_list=[z.name],
fetch_list=[z],
return_numpy=False,
)
expect_out = np.array(
[[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], [-1.0, -1.0, -1.0]]
)
np.testing.assert_allclose(expect_out, np.array(res), rtol=1e-05)

# case 3:
with program_guard(Program(), Program()):
def test_cross_api1(self):
self.input_data()

main = paddle.static.Program()
startup = paddle.static.Program()

# case 1:
with paddle.static.program_guard(main, startup):
x = paddle.static.data(name="x", shape=[-1, 3], dtype="float32")
y = paddle.static.data(name='y', shape=[-1, 3], dtype='float32')

Expand Down
51 changes: 32 additions & 19 deletions test/legacy_test/test_softmax_with_cross_entropy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,27 +153,30 @@ def setUp(self):

def test_check_output(self):
if self.python_api is not None:
self.check_output()
self.check_output()
self.check_output(check_pir=True)
self.check_output(check_pir=True)

def test_check_grad(self):
if core.is_compiled_with_rocm():
if self.python_api is not None:
self.check_grad(
["Logits"],
"Loss",
max_relative_error=5e-1,
["Logits"], "Loss", max_relative_error=5e-1, check_pir=False
)
# HIP will have accuracy fail when using float32 in CPU place
self.check_grad(["Logits"], "Loss", max_relative_error=5e-1)
self.check_grad(
["Logits"], "Loss", max_relative_error=5e-1, check_pir=False
)
else:
if self.python_api is not None:
self.check_grad(
["Logits"],
"Loss",
numeric_grad_delta=0.001,
check_pir=False,
)
self.check_grad(["Logits"], "Loss", numeric_grad_delta=0.001)
self.check_grad(
["Logits"], "Loss", numeric_grad_delta=0.001, check_pir=False
)


class TestSoftmaxWithCrossEntropyOpInt32(TestSoftmaxWithCrossEntropyOp):
Expand Down Expand Up @@ -509,13 +512,15 @@ def setUp(self):

def test_check_output(self):
if self.python_api is not None:
self.check_output()
self.check_output()
self.check_output(check_pir=True)
self.check_output(check_pir=True)

def test_check_grad(self):
if self.python_api is not None:
self.check_grad(["Logits"], "Loss")
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(["Logits"], "Loss", check_pir=False)
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_pir=False
)


class TestSoftmaxWithCrossEntropyOpNoCudnnFp16(
Expand All @@ -534,8 +539,12 @@ def initParams(self):

def test_check_grad(self):
if self.python_api is not None:
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_pir=False
)
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_pir=False
)


class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp):
Expand All @@ -557,19 +566,23 @@ def initParams(self):

def test_check_output(self):
if self.python_api is not None:
self.check_output()
self.check_output()
self.check_output(check_pir=True)
self.check_output(check_pir=True)

def test_check_grad(self):
if core.is_compiled_with_rocm():
# HIP will have accuracy fail when using float32 in CPU place
if self.python_api is not None:
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_pir=False
)
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_pir=False
)
else:
if self.python_api is not None:
self.check_grad(["Logits"], "Loss")
self.check_grad(["Logits"], "Loss")
self.check_grad(["Logits"], "Loss", check_pir=False)
self.check_grad(["Logits"], "Loss", check_pir=False)


class TestSoftmaxWithCrossEntropyOp3(TestSoftmaxWithCrossEntropyOp):
Expand Down

0 comments on commit d963050

Please sign in to comment.