Skip to content

Commit

Permalink
[Cherry-pick 2.5][Zero-Dim] paddle.to_tensor support 0D (#53599)
Browse files Browse the repository at this point in the history
* fix doc erros, test=allcase

* conflict

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* test=allcase

* fix doc erros, test=allcase

* fix the to_tensor error
  • Loading branch information
zhengqiwen1997 authored May 9, 2023
1 parent 3ffe8f3 commit 2aefc45
Show file tree
Hide file tree
Showing 29 changed files with 214 additions and 174 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1351,7 +1351,7 @@ void BindImperative(py::module *m_ptr) {
import paddle
x = paddle.to_tensor(1.0, stop_gradient=False)
x = paddle.to_tensor([1.0], stop_gradient=False)
detach_x = x.detach()
detach_x[:] = 10.0
print(x) # Tensor(shape=[1], dtype=float32, place=CPUPlace, stop_gradient=False,
Expand Down
9 changes: 8 additions & 1 deletion python/paddle/audio/functional/window.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
import math
from typing import List, Tuple, Union

import numpy as np

import paddle
from paddle import Tensor

Expand All @@ -38,7 +40,12 @@ def get(self, name):

@window_function_register.register()
def _cat(x: List[Tensor], data_type: str) -> Tensor:
l = [paddle.to_tensor(_, data_type) for _ in x]
l = []
for t in x:
if np.isscalar(t) and not isinstance(t, str):
l.append(paddle.to_tensor([t], data_type))
else:
l.append(paddle.to_tensor(t, data_type))
return paddle.concat(l)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def recv_meta(self, group):

def _send_dims_shape_dtype(self, tensor, group):
# send len(shape)
dims = paddle.to_tensor(len(tensor.shape))
dims = paddle.to_tensor([len(tensor.shape)])
dst_rank = _hcg._get_p2p_next_rank()

paddle.distributed.send(dims, dst=dst_rank, group=group)
Expand All @@ -127,11 +127,11 @@ def _send_dims_shape_dtype(self, tensor, group):
paddle.distributed.send(shape, dst=dst_rank, group=group)

# send dtype
dtype = paddle.to_tensor(paddle_2_number(tensor.dtype))
dtype = paddle.to_tensor([paddle_2_number(tensor.dtype)])
paddle.distributed.send(dtype, dst=dst_rank, group=group)

# send trainable
stop_grad = paddle.to_tensor(int(tensor.stop_gradient))
stop_grad = paddle.to_tensor([int(tensor.stop_gradient)])
paddle.distributed.send(stop_grad, dst=dst_rank, group=group)

def send_meta(self, tensor, group):
Expand All @@ -148,7 +148,7 @@ def send_meta(self, tensor, group):
# send tensor type
paddle.distributed.send(tensor_type, dst=dst_rank, group=group)

nums = paddle.to_tensor(len(tensor))
nums = paddle.to_tensor([len(tensor)])
paddle.distributed.send(nums, dst=dst_rank, group=group)

for d in tensor:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def compare_mask_between_ranks(
):

for np_mask in [mask_np_list[i] for i in comapre_idx]:
mask_tensor_local = paddle.to_tensor(np_mask.astype("float32"))
mask_tensor_local = paddle.to_tensor([np_mask.astype("float32")])
if rank == 0:
mask_tensor_remote = paddle.ones_like(mask_tensor_local)
dy_broadcast_helper(mask_tensor_remote)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def core_attn(self, q, k, v, attn_mask):
product = paddle.matmul(x=q, y=k, transpose_y=True)
product = paddle.multiply(
product,
paddle.to_tensor(self.head_dim**-0.5, dtype=product.dtype),
paddle.to_tensor([self.head_dim**-0.5], dtype=product.dtype),
)
if attn_mask is not None:
product = product + attn_mask
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,10 +184,10 @@ def init_dynamic_data(self, probs, default_dtype, dtype):
('probs_00', 0.0, 'float64', 'float32'),
('probs_03', 0.3, 'float64', 'float32'),
('probs_10', 1.0, 'float64', 'float32'),
('probs_tensor_03_32', paddle.to_tensor(0.3), 'float32', 'float32'),
('probs_tensor_03_32', paddle.to_tensor([0.3]), 'float32', 'float32'),
(
'probs_tensor_03_64',
paddle.to_tensor(0.3, dtype='float64'),
paddle.to_tensor([0.3], dtype='float64'),
'float64',
'float64',
),
Expand Down Expand Up @@ -257,11 +257,11 @@ def test_variance(self):
),
(
paddle.to_tensor(
0.0,
[0.0],
),
),
(paddle.to_tensor(1.0),),
(paddle.to_tensor(0.0, dtype='float64'),),
(paddle.to_tensor([1.0]),),
(paddle.to_tensor([0.0], dtype='float64'),),
]
)
def test_log_prob(self, value):
Expand Down Expand Up @@ -291,9 +291,9 @@ def test_log_prob(self, value):
]
),
),
(paddle.to_tensor(0.0),),
(paddle.to_tensor(1.0),),
(paddle.to_tensor(0.0, dtype='float64'),),
(paddle.to_tensor([0.0]),),
(paddle.to_tensor([1.0]),),
(paddle.to_tensor([0.0], dtype='float64'),),
]
)
def test_prob(self, value):
Expand Down Expand Up @@ -323,11 +323,11 @@ def test_prob(self, value):
]
),
),
(paddle.to_tensor(0.0),),
(paddle.to_tensor(0.3),),
(paddle.to_tensor(0.7),),
(paddle.to_tensor(1.0),),
(paddle.to_tensor(0.0, dtype='float64'),),
(paddle.to_tensor([0.0]),),
(paddle.to_tensor([0.3]),),
(paddle.to_tensor([0.7]),),
(paddle.to_tensor([1.0]),),
(paddle.to_tensor([0.0], dtype='float64'),),
]
)
def test_cdf(self, value):
Expand Down Expand Up @@ -359,7 +359,7 @@ def test_entropy(self):

def test_kl_divergence(self):
with paddle.fluid.dygraph.guard(self.place):
other_probs = paddle.to_tensor(0.9, dtype=self.dtype)
other_probs = paddle.to_tensor([0.9], dtype=self.dtype)

rv_paddle_other = Bernoulli(other_probs)
rv_np_other = BernoulliNumpy(other_probs)
Expand Down Expand Up @@ -422,7 +422,7 @@ def test_kl_divergence(self):
# 1-D probs
(
'probs_1d_1d_32',
paddle.to_tensor(0.3),
paddle.to_tensor([0.3]),
'float32',
'float32',
[
Expand All @@ -432,7 +432,7 @@ def test_kl_divergence(self):
),
(
'probs_1d_1d_64',
paddle.to_tensor(0.3, dtype='float64'),
paddle.to_tensor([0.3], dtype='float64'),
'float64',
'float64',
paddle.to_tensor(
Expand All @@ -444,15 +444,15 @@ def test_kl_divergence(self):
),
(
'probs_1d_2d',
paddle.to_tensor(0.3),
paddle.to_tensor([0.3]),
'float32',
'float32',
[100, 2],
[100, 2, 1],
),
(
'probs_1d_3d',
paddle.to_tensor(0.3),
paddle.to_tensor([0.3]),
'float32',
'float32',
[100, 2, 3],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ def test_codomain(self, input, expected):
transform.ChainTransform(
(
transform.AffineTransform(
paddle.to_tensor(0.0), paddle.to_tensor(1.0)
paddle.to_tensor([0.0]), paddle.to_tensor([1.0])
),
transform.ExpTransform(),
)
Expand Down Expand Up @@ -560,7 +560,7 @@ def test_forward(self, chain, input, expected):
transform.ChainTransform(
(
transform.AffineTransform(
paddle.to_tensor(0.0), paddle.to_tensor(-1.0)
paddle.to_tensor([0.0]), paddle.to_tensor([-1.0])
),
transform.ExpTransform(),
)
Expand Down Expand Up @@ -595,9 +595,9 @@ def test_inverse(self, chain, input, expected):
transform.ChainTransform(
(
transform.AffineTransform(
paddle.to_tensor(0.0), paddle.to_tensor(-1.0)
paddle.to_tensor([0.0]), paddle.to_tensor([-1.0])
),
transform.PowerTransform(paddle.to_tensor(2.0)),
transform.PowerTransform(paddle.to_tensor([2.0])),
)
),
np.array([1.0, 2.0, 3.0]),
Expand All @@ -619,7 +619,7 @@ def test_forward_log_det_jacobian(self, chain, input, expected):
transform.ChainTransform(
(
transform.AffineTransform(
paddle.to_tensor(0.0), paddle.to_tensor(-1.0)
paddle.to_tensor([0.0]), paddle.to_tensor([-1.0])
),
transform.ExpTransform(),
)
Expand All @@ -638,7 +638,7 @@ def test_forward_shape(self, chain, shape, expected_shape):
transform.ChainTransform(
(
transform.AffineTransform(
paddle.to_tensor(0.0), paddle.to_tensor(-1.0)
paddle.to_tensor([0.0]), paddle.to_tensor([-1.0])
),
transform.ExpTransform(),
)
Expand Down Expand Up @@ -743,7 +743,7 @@ def test_inverse_shape(self, shape, expected_shape):
@param.place(config.DEVICES)
class TestPowerTransform(unittest.TestCase):
def setUp(self):
self._t = transform.PowerTransform(paddle.to_tensor(2.0))
self._t = transform.PowerTransform(paddle.to_tensor([2.0]))

def test_init(self):
with self.assertRaises(TypeError):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def _np_sum_rightmost(self, value, n):
return np.sum(value, tuple(range(-n, 0))) if n > 0 else value

def test_log_prob(self):
value = paddle.to_tensor(0.5)
value = paddle.to_tensor([0.5])
np.testing.assert_allclose(
self.simple_log_prob(value, self.base, self.transforms),
self._t.log_prob(value),
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/distribution/test_kl.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ def test_dispatch_with_unregister(self):
),
(
'test-same-dist',
mock.Exponential(paddle.to_tensor(1.0)),
mock.Exponential(paddle.to_tensor(1.0)),
mock.Exponential(paddle.to_tensor([1.0])),
mock.Exponential(paddle.to_tensor([1.0])),
),
],
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self):
self.step = 0

def forward(self, x):
return paddle.to_tensor(0.0, dtype='float32')
return paddle.to_tensor([0.0], dtype='float32')


def fake_sample_reader():
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_cholesky_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def test_dygraph(self):
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
x = paddle.to_tensor([x_data])
out = paddle.cholesky(x, upper=False)


Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_deg2rad.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def setUp(self):
def test_dygraph(self):
paddle.disable_static()

x2 = paddle.to_tensor(180)
x2 = paddle.to_tensor([180])
result2 = paddle.deg2rad(x2)
np.testing.assert_allclose(np.pi, result2.numpy(), rtol=1e-05)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def test_ops_elementwise_mul(self):
def test_tensor_gradient(self):
paddle.__version__ = '2.1.0'

x = paddle.to_tensor(5.0, stop_gradient=False)
x = paddle.to_tensor([5.0], stop_gradient=False)
y = paddle.pow(x, 4.0)
y.backward()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_einsum_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ def test_shape(self):
B = paddle.to_tensor(np.array([2.0, 3.0])).astype(paddle.bfloat16)
B = B.cuda()
C = paddle.einsum('i,i->', A, B)
D = paddle.to_tensor(8.0).astype(paddle.bfloat16)
D = paddle.to_tensor([8.0]).astype(paddle.bfloat16)
self.assertEqual(C.item(), D.item())


Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_inplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,11 +259,11 @@ def inplace_api_processing(self, var):

class TestDygraphInplaceReshapeTensor(TestDygraphInplace):
def non_inplace_api_processing(self, var):
shape = paddle.to_tensor(-1)
shape = paddle.to_tensor([-1])
return paddle.reshape(var, shape)

def inplace_api_processing(self, var):
shape = paddle.to_tensor(-1)
shape = paddle.to_tensor([-1])
return paddle.reshape_(var, shape)


Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/test_jit_save_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def __init__(self, in_size, out_size):
super().__init__()
self._linear_0 = Linear(in_size, out_size)
self._linear_1 = Linear(in_size, out_size)
self._scale = paddle.to_tensor(9.9)
self._scale = paddle.to_tensor([9.9])

@paddle.jit.to_static
def forward(self, x):
Expand Down Expand Up @@ -1196,7 +1196,7 @@ def __init__(self, in_size, out_size):
self._linear_1_0 = Linear(self.hidden, self.hidden)
self._linear_1_1 = Linear(self.hidden, self.hidden)
self._linear_2 = Linear(self.hidden, out_size)
self._scale = paddle.to_tensor(9.9)
self._scale = paddle.to_tensor([9.9])

@paddle.jit.to_static
def forward(self, x):
Expand Down Expand Up @@ -1319,7 +1319,7 @@ def __init__(self, in_size, out_size, load_path):
self._linear_1_0 = Linear(out_size, in_size)
self._linear_1_1 = Linear(out_size, in_size)
self._linear_2 = Linear(out_size, out_size)
self._scale = paddle.to_tensor(9.9)
self._scale = paddle.to_tensor([9.9])

# Load multiple times
self._load_l1 = paddle.jit.load(load_path)
Expand Down
Loading

0 comments on commit 2aefc45

Please sign in to comment.