Skip to content

Commit

Permalink
【PIR API adaptor No.261、273、283、285、286、313、315】 Migrate is_tensor/me…
Browse files Browse the repository at this point in the history
…dian/nanmean/nansum/neg/Unflatten/var into pir (PaddlePaddle#59509)
  • Loading branch information
DrRyanHuang authored and SigureMo committed Dec 5, 2023
1 parent 3340a7e commit e080c2e
Show file tree
Hide file tree
Showing 12 changed files with 137 additions and 84 deletions.
6 changes: 4 additions & 2 deletions python/paddle/tensor/logic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1106,8 +1106,10 @@ def is_tensor(x):
False
"""
if in_dynamic_mode():
return isinstance(x, (Tensor, paddle.base.core.eager.Tensor))
if in_dynamic_or_pir_mode():
return isinstance(
x, (Tensor, paddle.base.core.eager.Tensor, paddle.pir.Value)
)
else:
return isinstance(x, Variable)

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5653,7 +5653,7 @@ def unflatten(x, axis, shape, name=None):
new_shape = (
list(x.shape[:axis]) + list(shape) + list(x.shape[axis + 1 :])
)
elif isinstance(shape, Variable):
elif isinstance(shape, (Variable, paddle.pir.Value)):
# The data type returned by `paddle.shape` is only 'int32'.
new_shape = paddle.concat(
[
Expand Down
14 changes: 10 additions & 4 deletions python/paddle/tensor/stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import paddle
from paddle import _C_ops
from paddle.base.libpaddle import DataType
from paddle.framework import in_dynamic_mode, in_dynamic_or_pir_mode

from ..base.data_feeder import check_type, check_variable_and_dtype
Expand Down Expand Up @@ -313,7 +314,7 @@ def nanmedian(x, axis=None, keepdim=False, name=None):
>>> print(y4.numpy())
2.0
"""
if not isinstance(x, Variable):
if not isinstance(x, (Variable, paddle.pir.Value)):
raise TypeError("In median, the input x should be a Tensor.")

if isinstance(axis, (list, tuple)) and len(axis) == 0:
Expand Down Expand Up @@ -403,10 +404,11 @@ def median(x, axis=None, keepdim=False, name=None):
[[4., 5., 6., 7.]])
"""
if not isinstance(x, Variable):
if not isinstance(x, (Variable, paddle.pir.Value)):
raise TypeError("In median, the input x should be a Tensor.")

if x.size == 0:
if in_dynamic_mode() and x.size == 0:
# TODO: Currently, `__eq__` don't support arguments (`pir.Value` & `int`)
raise ValueError("In median, the size of input x should not be 0.")

is_flatten = False
Expand Down Expand Up @@ -435,7 +437,11 @@ def median(x, axis=None, keepdim=False, name=None):
sz = x.shape[axis]
kth = sz >> 1
tensor_topk, idx = paddle.topk(x, kth + 1, axis=axis, largest=False)
dtype = 'float64' if x.dtype == core.VarDesc.VarType.FP64 else 'float32'
dtype = (
'float64'
if x.dtype in [core.VarDesc.VarType.FP64, DataType.FLOAT64]
else 'float32'
)
if sz & 1 == 0:
out_tensor = paddle.slice(
tensor_topk, axes=[axis], starts=[kth - 1], ends=[kth]
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/test_bce_with_logits_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def test_BCEWithLogitsLoss(self):
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)

@test_with_pir_api
def test_dynamic_or_pir_mode():
def test_static_or_pir_mode():
static_result = test_static(
place, logit_np, label_np, reduction=reduction
)
Expand All @@ -192,7 +192,7 @@ def test_dynamic_or_pir_mode():
static_functional, dy_functional, rtol=1e-05
)

test_dynamic_or_pir_mode()
test_static_or_pir_mode()

def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
Expand Down Expand Up @@ -230,7 +230,7 @@ def test_BCEWithLogitsLoss_weight(self):
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)

@test_with_pir_api
def test_dynamic_or_pir_mode():
def test_static_or_pir_mode():
static_result = test_static(
place,
logit_np,
Expand All @@ -257,7 +257,7 @@ def test_dynamic_or_pir_mode():
static_functional, dy_functional, rtol=1e-05
)

test_dynamic_or_pir_mode()
test_static_or_pir_mode()

def test_BCEWithLogitsLoss_pos_weight(self):
logit_np = np.random.uniform(0.1, 0.8, size=(2, 3, 4, 10)).astype(
Expand Down Expand Up @@ -294,7 +294,7 @@ def test_BCEWithLogitsLoss_pos_weight(self):
np.testing.assert_allclose(dy_functional, expected, rtol=1e-05)

@test_with_pir_api
def test_dynamic_or_pir_mode():
def test_static_or_pir_mode():
static_result = test_static(
place, logit_np, label_np, weight_np, reduction, pos_weight_np
)
Expand All @@ -315,7 +315,7 @@ def test_dynamic_or_pir_mode():
static_functional, dy_functional, rtol=1e-05
)

test_dynamic_or_pir_mode()
test_static_or_pir_mode()

def test_BCEWithLogitsLoss_error(self):
paddle.disable_static()
Expand Down
3 changes: 3 additions & 0 deletions test/legacy_test/test_is_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import unittest

import paddle
from paddle.pir_utils import test_with_pir_api

DELTA = 0.00001

Expand Down Expand Up @@ -49,10 +50,12 @@ def setUp(self):
def tearDown(self):
paddle.disable_static()

@test_with_pir_api
def test_is_tensor(self):
x = paddle.rand([3, 2, 4], dtype='float32')
self.assertTrue(paddle.is_tensor(x))

@test_with_pir_api
def test_is_tensor_array(self):
x = paddle.tensor.create_array('float32')
self.assertTrue(paddle.is_tensor(x))
Expand Down
9 changes: 5 additions & 4 deletions test/legacy_test/test_median.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import numpy as np

import paddle
from paddle.static import Program, program_guard
from paddle.pir_utils import test_with_pir_api

DELTA = 1e-6

Expand All @@ -32,10 +32,10 @@ def static_single_test_median(self, lis_test):
paddle.enable_static()
x, axis, keepdims = lis_test
res_np = np.median(x, axis=axis, keepdims=keepdims)
main_program = Program()
startup_program = Program()
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
exe = paddle.static.Executor()
with program_guard(main_program, startup_program):
with paddle.static.program_guard(main_program, startup_program):
x_in = paddle.static.data(shape=x.shape, dtype=x.dtype, name='x')
y = paddle.median(x_in, axis, keepdims)
[res_pd] = exe.run(feed={'x': x}, fetch_list=[y])
Expand All @@ -48,6 +48,7 @@ def dygraph_single_test_median(self, lis_test):
res_pd = paddle.median(paddle.to_tensor(x), axis, keepdims)
self.check_numpy_res(res_pd.numpy(False), res_np)

@test_with_pir_api
def test_median_static(self):
h = 3
w = 4
Expand Down
3 changes: 3 additions & 0 deletions test/legacy_test/test_nanmean_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import paddle
from paddle.base import core
from paddle.pir_utils import test_with_pir_api

np.random.seed(10)

Expand All @@ -38,6 +39,7 @@ def setUp(self):
else paddle.CPUPlace()
)

@test_with_pir_api
def test_api_static(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
Expand Down Expand Up @@ -87,6 +89,7 @@ def test_case(x, axis=None, keepdim=False):
test_case(self.x, [0, 1, 2, 3])
paddle.enable_static()

@test_with_pir_api
def test_errors(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
Expand Down
9 changes: 6 additions & 3 deletions test/legacy_test/test_nansum_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,16 @@

import paddle
from paddle import base
from paddle.pir_utils import test_with_pir_api


class API_Test_Nansum(unittest.TestCase):
@test_with_pir_api
def test_static_graph(self):
paddle.enable_static()
startup_program = base.Program()
train_program = base.Program()
with base.program_guard(train_program, startup_program):
startup_program = paddle.static.Program()
train_program = paddle.static.Program()
with paddle.static.program_guard(train_program, startup_program):
input = paddle.static.data(
name='input', dtype='float32', shape=[2, 4]
)
Expand Down Expand Up @@ -75,6 +77,7 @@ def test_static_graph(self):
)

# test nansum api with float16
@test_with_pir_api
def test_static_graph_fp16(self):
paddle.enable_static()
startup_program = paddle.static.Program()
Expand Down
35 changes: 18 additions & 17 deletions test/legacy_test/test_neg_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np

import paddle
from paddle.pir_utils import test_with_pir_api


class TestNegOp(unittest.TestCase):
Expand All @@ -35,26 +36,28 @@ def run_imperative(self):
dy_result.numpy(), expected_result, rtol=1e-05
)

@test_with_pir_api
def run_static(self, use_gpu=False):
input = paddle.static.data(
name='input', shape=[32, 8], dtype=self.dtype
)
result = paddle.neg(input)

place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
st_result = exe.run(feed={"input": self.input}, fetch_list=[result])
expected_result = np.negative(self.input)
np.testing.assert_allclose(st_result[0], expected_result, rtol=1e-05)
with paddle.static.program_guard(paddle.static.Program()):
input = paddle.static.data(
name='input', shape=[32, 8], dtype=self.dtype
)
result = paddle.neg(input)

place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
st_result = exe.run(feed={"input": self.input}, fetch_list=[result])
expected_result = np.negative(self.input)
np.testing.assert_allclose(
st_result[0], expected_result, rtol=1e-05
)

def test_cpu(self):
paddle.disable_static(place=paddle.CPUPlace())
self.run_imperative()
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program()):
self.run_static()
self.run_static()

def test_gpu(self):
if not paddle.base.core.is_compiled_with_cuda():
Expand All @@ -63,9 +66,7 @@ def test_gpu(self):
paddle.disable_static(place=paddle.CUDAPlace(0))
self.run_imperative()
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program()):
self.run_static(use_gpu=True)
self.run_static(use_gpu=True)


class TestNegOpFp32(TestNegOp):
Expand Down
Loading

0 comments on commit e080c2e

Please sign in to comment.