diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 447e1f3b190bf..ff8eed1aa1715 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -52,6 +52,7 @@ ones, ones_like, polar, + resize_, set_, to_tensor, tril, @@ -868,6 +869,7 @@ 'signbit', 'log_normal_', 'set_', + 'resize_', ] # this list used in math_op_patch.py for magic_method bind diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index beac520562d7b..e013d0bade481 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -3389,3 +3389,82 @@ def set_( shape = source.shape return _C_ops.set_(x, source, shape, stride, offset) + + +@inplace_apis_in_dygraph_only +def resize_( + x: paddle.Tensor, + shape: Sequence[int], + fill_zero: bool = False, + name: str | None = None, +) -> paddle.Tensor: + """ + Resize ``x`` with specified ``shape``. + + Args: + x (Tensor): An arbitrary Tensor. The data type supports ``bfloat16``, ``float16``, ``float32``, ``float64``, + ``bool``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64`` or ``complex128``. + shape (list|tuple): Define the target shape. Each element of it should be integer. + fill_zero (bool, optional): If the size of specified ``shape`` is greater than the original Tensor size, the + new Tensor will be filled with zero if ``fill_zero`` is True. Default: False, which means the filled value + will be undetermined. + name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor, the resized Tensor. + + Examples: + .. code-block:: python + + >>> import paddle + + >>> x = paddle.to_tensor([1., 2., 3.]) + >>> x.resize_([2, 1]) + >>> print(x) + Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True, + [[1.], + [2.]]) + + >>> x = paddle.to_tensor([1., 2., 3.]) + >>> x.resize_([2, 3], fill_zero=True) + >>> print(x) + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[1., 2., 3.], + [0., 0., 0.]]) + + """ + if in_dynamic_mode(): + check_dtype( + x.dtype, + 'x', + [ + 'bool', + 'float16', + 'uint16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'complex64', + 'complex128', + ], + 'resize', + ) + if not isinstance(shape, (list, tuple)): + raise ValueError( + f"Input (shape) should be list or tuple but received {type(shape)}" + ) + new_size = math.prod(shape) + old_size = math.prod(x.shape) + if (new_size > old_size) and fill_zero: + repeats = -(-new_size // old_size) # ceil division + flatten_x = x.flatten() + tmp = paddle.concat( + (flatten_x,) + (paddle.zeros_like(flatten_x),) * (repeats - 1) + )[:new_size] + return x.set_(tmp, shape) + + return x.set_(x, shape) diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 8095110678f9a..e1f4cfc1e227e 100755 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -2283,5 +2283,171 @@ def test_inplace_api(self): self.assertTrue(x._is_shared_buffer_with(new_x)) +class TestDygraphInplaceResize(unittest.TestCase): + def setUp(self): + self.init_data() + self.places = [paddle.CPUPlace()] + if paddle.base.core.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + self.support_dtypes = [ + 'float32', + 'float64', + 'bool', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'complex64', + 'complex128', + ] + + def init_data(self): + self.x_np = np.random.uniform(-5, 5, [3, 10, 2]) + self.dtype = "float32" + self.new_shape1 = [20] + self.new_shape2 = [9, 11] + + def non_inplace_api_processing(self, x, shape, fill_zero=False): + x = x.numpy().copy() + x.resize(shape, refcheck=False) + return paddle.to_tensor(x) + + def inplace_api_processing(self, x, shape, fill_zero=False): + return paddle.Tensor.resize_(x, shape, fill_zero) + + def test_inplace_api(self): + for dtype in self.support_dtypes: + for place in self.places: + with paddle.base.dygraph.guard(place): + x = paddle.to_tensor(self.x_np).astype(dtype) + inplace_x1 = self.inplace_api_processing(x, self.new_shape1) + self.assertTrue(id(x) == id(inplace_x1)) + + x = paddle.to_tensor(self.x_np).astype(dtype) + inplace_x2 = self.inplace_api_processing(x, self.new_shape2) + self.assertTrue(id(x) == id(inplace_x2)) + + def test_forward_result(self): + old_numel = np.prod(self.x_np.shape) + + x = paddle.to_tensor(self.x_np).astype(self.dtype) + no_inplace_x1 = self.non_inplace_api_processing(x, self.new_shape1) + inplace_x1 = self.inplace_api_processing(x, self.new_shape1) + np.testing.assert_array_equal(no_inplace_x1.numpy(), inplace_x1.numpy()) + + x = paddle.to_tensor(self.x_np).astype(self.dtype) + no_inplace_x2 = self.non_inplace_api_processing(x, self.new_shape2) + inplace_x2 = self.inplace_api_processing(x, self.new_shape2) + np.testing.assert_array_equal( + no_inplace_x2.numpy().flatten()[:old_numel], + inplace_x2.numpy().flatten()[:old_numel], + ) + + x = paddle.to_tensor(self.x_np).astype(self.dtype) + no_inplace_x3 = self.non_inplace_api_processing( + x, self.new_shape1, fill_zero=True + ) + inplace_x3 = self.inplace_api_processing( + x, self.new_shape1, fill_zero=True + ) + np.testing.assert_array_equal(no_inplace_x3.numpy(), inplace_x3.numpy()) + + x = paddle.to_tensor(self.x_np).astype(self.dtype) + no_inplace_x2 = self.non_inplace_api_processing( + x, self.new_shape2, fill_zero=True + ) + inplace_x2 = self.inplace_api_processing( + x, self.new_shape2, fill_zero=True + ) + np.testing.assert_array_equal(no_inplace_x2.numpy(), inplace_x2.numpy()) + + def test_forward_version(self): + with paddle.base.dygraph.guard(): + x = paddle.to_tensor(self.x_np).astype(self.dtype) + self.assertEqual(x.inplace_version, 0) + + x = self.inplace_api_processing(x, self.new_shape1) + self.assertEqual(x.inplace_version, 1) + + x = self.inplace_api_processing(x, self.new_shape2) + self.assertEqual(x.inplace_version, 2) + + def test_leaf_inplace_var_error(self): + with paddle.base.dygraph.guard(): + x = paddle.to_tensor(self.x_np).astype(self.dtype) + x.stop_gradient = False + + def leaf_inplace_error(): + self.inplace_api_processing(x, self.new_shape1) + + self.assertRaises(ValueError, leaf_inplace_error) + + def test_argument_error(self): + with paddle.base.dygraph.guard(): + x = paddle.to_tensor(self.x_np).astype(self.dtype) + + def argument_error(): + self.inplace_api_processing(x, 2.0) + + self.assertRaises(ValueError, argument_error) + + +@unittest.skipIf( + not paddle.base.core.is_compiled_with_cuda() + or not paddle.base.core.is_float16_supported(paddle.CUDAPlace(0)), + "core is not compiled with CUDA and not support the float16", +) +class TestDygraphInplaceResizeFP16(TestDygraphInplaceResize): + def setUp(self): + self.init_data() + self.places = [paddle.CUDAPlace(0)] + + def init_data(self): + self.x_np = np.random.uniform(-5, 5, [3, 10, 2]) + self.dtype = "float16" + self.new_shape1 = [20] + self.new_shape2 = [8, 12] + + def test_inplace_api(self): + for place in self.places: + with paddle.base.dygraph.guard(place): + x = paddle.to_tensor(self.x_np).astype(self.dtype) + inplace_x1 = self.inplace_api_processing(x, self.new_shape1) + self.assertTrue(id(x) == id(inplace_x1)) + + x = paddle.to_tensor(self.x_np).astype(self.dtype) + inplace_x2 = self.inplace_api_processing(x, self.new_shape2) + self.assertTrue(id(x) == id(inplace_x2)) + + +@unittest.skipIf( + not paddle.base.core.is_compiled_with_cuda() + or not paddle.base.core.is_bfloat16_supported(paddle.CUDAPlace(0)), + "core is not compiled with CUDA and not support the bfloat16", +) +class TestDygraphInplaceResizeBF16(TestDygraphInplaceResize): + def setUp(self): + self.init_data() + self.places = [paddle.CUDAPlace(0)] + + def init_data(self): + self.x_np = np.random.uniform(-5, 5, [3, 10, 2]) + self.dtype = "bfloat16" + self.new_shape1 = [15] + self.new_shape2 = [9, 11] + + def test_inplace_api(self): + for place in self.places: + with paddle.base.dygraph.guard(place): + x = paddle.to_tensor(self.x_np).astype(self.dtype) + inplace_x1 = self.inplace_api_processing(x, self.new_shape1) + self.assertTrue(id(x) == id(inplace_x1)) + + x = paddle.to_tensor(self.x_np).astype(self.dtype) + inplace_x2 = self.inplace_api_processing(x, self.new_shape2) + self.assertTrue(id(x) == id(inplace_x2)) + + if __name__ == '__main__': unittest.main()