Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tensor fluid code transfer part2 #41096

Merged
merged 1 commit into from
Apr 13, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion python/paddle/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
from typing import Sequence
import numpy as np
import paddle
from .tensor.attribute import is_complex, is_floating_point, is_integer, _real_to_complex_dtype, _complex_to_real_dtype
from .tensor.attribute import is_complex, is_floating_point, is_integer
from .tensor.creation import _real_to_complex_dtype, _complex_to_real_dtype
from .fluid.framework import _non_static_mode
from . import _C_ops
from .fluid.data_feeder import check_variable_and_dtype
Expand Down
18 changes: 9 additions & 9 deletions python/paddle/fluid/tests/unittests/test_crop_tensor_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid


Expand Down Expand Up @@ -225,31 +226,30 @@ def test_exception(self):
offset = fluid.data(name='offset', shape=[1], dtype='int32')

def attr_shape_type():
out = fluid.layers.crop_tensor(input1, shape=3)
out = paddle.crop(input1, shape=3)

def attr_shape_dtype():
out = fluid.layers.crop_tensor(input1, shape=[2, 2.0, 3, 3])
out = paddle.crop(input1, shape=[2, 2.0, 3, 3])

def attr_shape_value1():
out = fluid.layers.crop_tensor(input1, shape=[2, -2, dim, 3])
out = paddle.crop(input1, shape=[2, -2, dim, 3])

def attr_shape_value2():
out = fluid.layers.crop_tensor(input1, shape=[2, 0, dim, 3])
out = paddle.crop(input1, shape=[2, 0, dim, 3])

def attr_offsets_type():
out = fluid.layers.crop_tensor(
input1, shape=[2, 2, 3, 3], offsets=0)
out = paddle.crop(input1, shape=[2, 2, 3, 3], offsets=0)

def attr_offsets_dtype():
out = fluid.layers.crop_tensor(
out = paddle.crop(
input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0])

def attr_offsets_value():
out = fluid.layers.crop_tensor(
out = paddle.crop(
input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0])

def input_dtype():
out = fluid.layers.crop_tensor(input2, shape=[2, 2, 3, 3])
out = paddle.crop(input2, shape=[2, 2, 3, 3])

self.assertRaises(TypeError, attr_shape_type)
self.assertRaises(TypeError, attr_shape_dtype)
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/tests/unittests/test_slice_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,13 +534,13 @@ def test_1(self):
# value_int64 is greater than 2147483647 which is the max of int32
value_int64 = fluid.layers.fill_constant([1], "int64", 2147483648)

out_1 = fluid.layers.slice(
out_1 = paddle.slice(
x, axes=[0, 1, 2], starts=[-3, 0, 2], ends=[value_int64, 100, -1])
out_2 = fluid.layers.slice(
out_2 = paddle.slice(
x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, -1])
out_3 = fluid.layers.slice(
out_3 = paddle.slice(
x, axes=[0, 1, 3], starts=[minus_3, 0, 2], ends=[3, 100, minus_1])
out_4 = fluid.layers.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)
out_4 = paddle.slice(x, axes=[0, 1, 2], starts=starts, ends=ends)

out_5 = x[-3:3, 0:100, 2:-1]
out_6 = x[minus_3:3, 0:100, :, 2:-1]
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/tests/unittests/test_strided_slice_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,25 +534,25 @@ def test_1(self):
shape=[3, 4, 5, 6],
append_batch_size=False,
dtype="float64")
out_1 = fluid.layers.strided_slice(
out_1 = paddle.strided_slice(
x,
axes=[0, 1, 2],
starts=[-3, 0, 2],
ends=[3, 100, -1],
strides=[1, 1, 1])
out_2 = fluid.layers.strided_slice(
out_2 = paddle.strided_slice(
x,
axes=[0, 1, 3],
starts=[minus_3, 0, 2],
ends=[3, 100, -1],
strides=[1, 1, 1])
out_3 = fluid.layers.strided_slice(
out_3 = paddle.strided_slice(
x,
axes=[0, 1, 3],
starts=[minus_3, 0, 2],
ends=[3, 100, minus_1],
strides=[1, 1, 1])
out_4 = fluid.layers.strided_slice(
out_4 = paddle.strided_slice(
x, axes=[0, 1, 2], starts=starts, ends=ends, strides=strides)

out_5 = x[-3:3, 0:100:2, -1:2:-1]
Expand Down
129 changes: 110 additions & 19 deletions python/paddle/tensor/attribute.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,37 +14,128 @@

from __future__ import print_function

from ..framework import core
from ..fluid.layer_helper import LayerHelper
from ..framework import core, _non_static_mode
from ..framework import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.data_feeder import check_type

from .creation import assign
from .creation import _complex_to_real_dtype

# TODO: define functions to get tensor attributes
from ..fluid.layers import rank # noqa: F401
from ..fluid.layers import shape # noqa: F401
import paddle
from paddle import _C_ops
from paddle.static import Variable
from ..static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode

import numpy as np

__all__ = []


def _complex_to_real_dtype(dtype):
if dtype == core.VarDesc.VarType.COMPLEX64:
return core.VarDesc.VarType.FP32
elif dtype == core.VarDesc.VarType.COMPLEX128:
return core.VarDesc.VarType.FP64
else:
return dtype
def rank(input):
"""

The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.

Args:
input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.

Returns:
Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor.

Examples:
.. code-block:: python

import paddle

input = paddle.rand((3, 100, 100))
rank = paddle.rank(input)
print(rank)
# 3
"""
check_type(input, 'input', (Variable), 'input')
ndims = len(input.shape)
out = assign(np.array(ndims, 'int32'))

return out


def shape(input):
"""
:alias_main: paddle.shape
:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
:old_api: paddle.fluid.layers.shape

**Shape Layer**

Get the shape of the input.

.. code-block:: text

Case1:
Given N-D Tensor:
input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]

Then:
input.shape = [2, 4]

Case2:
Given SelectedRows:
input.rows = [0, 4, 19]
input.height = 20
input.value = [ [1, 2], [3, 4], [5, 6] ] # inner tensor
Then:
input.shape = [3, 2]

Args:
input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
If input variable is type of SelectedRows, returns the shape of it's inner tensor.

Returns:
Variable (Tensor): The shape of the input variable.

Examples:
.. code-block:: python

def _real_to_complex_dtype(dtype):
if dtype == core.VarDesc.VarType.FP32:
return core.VarDesc.VarType.COMPLEX64
elif dtype == core.VarDesc.VarType.FP64:
return core.VarDesc.VarType.COMPLEX128
else:
return dtype
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()

inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
output = fluid.layers.shape(inputs)

exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())

img = np.ones((3, 100, 100)).astype(np.float32)

res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
print(res) # [array([ 3, 100, 100], dtype=int32)]
"""
if in_dygraph_mode():
out = _C_ops.final_state_shape(input)
out.stop_gradient = True
return out
if _in_legacy_dygraph():
out = _C_ops.shape(input)
out.stop_gradient = True
return out

check_variable_and_dtype(input, 'input', [
'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64',
'complex128'
], 'shape')
helper = LayerHelper('shape', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='shape',
inputs={'Input': input},
outputs={'Out': out},
stop_gradient=True)

return out


def is_complex(x):
Expand Down
Loading