Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add paddle.tensor.math.prod #26351

Merged
merged 5 commits into from
Aug 20, 2020
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@
from .tensor.math import clamp #DEFINE_ALIAS
from .tensor.math import trace #DEFINE_ALIAS
from .tensor.math import kron #DEFINE_ALIAS
from .tensor.math import prod #DEFINE_ALIAS
# from .tensor.random import gaussin #DEFINE_ALIAS
# from .tensor.random import uniform #DEFINE_ALIAS
from .tensor.random import shuffle #DEFINE_ALIAS
Expand Down
13 changes: 11 additions & 2 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4634,9 +4634,18 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0]
"""
helper = LayerHelper('reduce_prod', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list):
dim = [dim]
if isinstance(dim, tuple):
dim = list(dim)
elif isinstance(dim, int):
dim = [dim]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".
format(type(dim)))
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_prod',
inputs={'X': input},
Expand Down
123 changes: 123 additions & 0 deletions python/paddle/fluid/tests/unittests/test_prod_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import paddle
import paddle.fluid as fluid
import unittest
import numpy as np


class TestProdOp(unittest.TestCase):
def setUp(self):
self.input = np.random.random(size=(10, 10, 5)).astype(np.float32)

def run_imperative(self):
input = paddle.to_tensor(self.input)
dy_result = paddle.prod(input)
expected_result = np.prod(self.input)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=1)
expected_result = np.prod(self.input, axis=1)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=[0, 1])
expected_result = np.prod(self.input, axis=(0, 1))
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=1, keepdim=True)
expected_result = np.prod(self.input, axis=1, keepdims=True)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=1, dtype='int64')
expected_result = np.prod(self.input, axis=1, dtype=np.int64)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

dy_result = paddle.prod(input, axis=1, keepdim=True, dtype='int64')
expected_result = np.prod(
self.input, axis=1, keepdims=True, dtype=np.int64)
self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

def run_static(self, use_gpu=False):
input = fluid.data(name='input', shape=[10, 10, 5], dtype='float32')
result0 = paddle.prod(input)
result1 = paddle.prod(input, axis=1)
result2 = paddle.prod(input, axis=[0, 1])
result3 = paddle.prod(input, axis=1, keepdim=True)
result4 = paddle.prod(input, axis=1, dtype='int64')
result5 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

place = fluid.CUDAPlace(4) if use_gpu else fluid.CPUPlace()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is it 4? Please set to 0 to avoid some CI machines only have 2 GPU cards.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, done.

exe = fluid.Executor(place)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

paddle.static.Executor

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, done.

exe.run(fluid.default_startup_program())
static_result = exe.run(
feed={"input": self.input},
fetch_list=[result0, result1, result2, result3, result4, result5])

expected_result = np.prod(self.input)
self.assertTrue(np.allclose(static_result[0], expected_result))
expected_result = np.prod(self.input, axis=1)
self.assertTrue(np.allclose(static_result[1], expected_result))
expected_result = np.prod(self.input, axis=(0, 1))
self.assertTrue(np.allclose(static_result[2], expected_result))
expected_result = np.prod(self.input, axis=1, keepdims=True)
self.assertTrue(np.allclose(static_result[3], expected_result))
expected_result = np.prod(self.input, axis=1, dtype=np.int64)
self.assertTrue(np.allclose(static_result[4], expected_result))
expected_result = np.prod(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

axis支持负数吗? 如果支持可以添加单测

Copy link
Contributor Author

@gfwm2013 gfwm2013 Aug 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, done.

self.input, axis=1, keepdims=True, dtype=np.int64)
self.assertTrue(np.allclose(static_result[5], expected_result))

def test_cpu(self):
paddle.disable_static(place=paddle.fluid.CPUPlace())
self.run_imperative()
paddle.enable_static()

with fluid.program_guard(fluid.Program()):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

尽量减少fluid

Copy link
Contributor Author

@gfwm2013 gfwm2013 Aug 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, done.

self.run_static()

def test_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return

paddle.disable_static(place=paddle.fluid.CUDAPlace(4))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is it 4? Please set to 0 to avoid some CI machines only have 2 GPU cards.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, done.

self.run_imperative()
paddle.enable_static()

with fluid.program_guard(fluid.Program()):
self.run_static(use_gpu=True)


class TestProdOpError(unittest.TestCase):
def test_error(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32')
bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool')
# The argument x shoule be a Tensor
self.assertRaises(TypeError, paddle.prod, [1])

# The data type of x should be float32, float64, int32, int64
self.assertRaises(TypeError, paddle.prod, bool_x)

# The argument axis's type shoule be int ,list or tuple
self.assertRaises(TypeError, paddle.prod, x, 1.5)

# The argument dtype of prod_op should be float32, float64, int32 or int64.
self.assertRaises(TypeError, paddle.prod, x, 'bool')


if __name__ == "__main__":
unittest.main()
1 change: 1 addition & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,7 @@
from .math import clamp #DEFINE_ALIAS
from .math import trace #DEFINE_ALIAS
from .math import kron #DEFINE_ALIAS
from .math import prod #DEFINE_ALIAS
# from .random import gaussin #DEFINE_ALIAS
# from .random import uniform #DEFINE_ALIAS
from .random import shuffle #DEFINE_ALIAS
Expand Down
79 changes: 79 additions & 0 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
from ..fluid.layers import increment #DEFINE_ALIAS
from ..fluid.layers import multiplex #DEFINE_ALIAS
from ..fluid.layers import sums #DEFINE_ALIAS
from ..fluid import layers

__all__ = [
'abs',
Expand All @@ -85,6 +86,7 @@
'log',
'mul',
'multiplex',
'prod',
'pow',
'reciprocal',
'reduce_max',
Expand Down Expand Up @@ -1622,3 +1624,80 @@ def cumsum(x, axis=None, dtype=None, name=None):
kwargs[name] = val
_cum_sum_ = generate_layer_fn('cumsum')
return _cum_sum_(**kwargs)

def prod(x, axis=None, keepdim=False, dtype=None, name=None):
"""
Compute the product of tensor elements over the given axis.

Args:
x(Tensor): Input of prod operator. The data type is float32, float64, int32, int64.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The data type is float32, float64, int32, or int64

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, done.

axis(list|int, optional): The axis along which the product is computed. If :attr:`None`,
multiply all elements of `x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`,
the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
dtype(str, optional): The desired date type of returned tensor, can be float32, float64,
int32, int64. If specified, the input tensor is casted to dtype before operator performed.
This is very useful for avoiding data type overflows. The default value is False.
keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result
tensor will have one fewer dimension than the input unless keep_dim is true. Default is False.
name(string, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .

Returns:
Tensor, result of product on the specified dim of input tensor.

Examples:
.. code-block:: python

import paddle
import numpy as np

paddle.disable_static()

# the axis is a int element
data_x = np.array([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]]).astype(np.float32)
x = paddle.to_tensor(data_x)
out1 = paddle.prod(x)
print(out1.numpy())
# [0.0002268]

out2 = paddle.prod(x, -1)
print(out2.numpy())
# [0.027 0.0084]

out3 = paddle.prod(x, 0)
print(out3.numpy())
# [0.02 0.06 0.3 0.63]
print(out3.numpy().dtype)
# float32

out4 = paddle.prod(x, 0, keepdim=True)
print(out4.numpy())
# [[0.02 0.06 0.3 0.63]]

out5 = paddle.prod(x, 0, dtype='int64')
print(out5.numpy())
# [0 0 0 0]
print(out5.numpy().dtype)
# int64

# the axis is list
data_y = np.array([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
y = paddle.to_tensor(data_y)
out6 = paddle.prod(y, [0, 1])
print(out6.numpy())
# [105. 384.]

out7 = paddle.prod(y, (1, 2))
print(out7.numpy())
# [ 24. 1680.]

"""
if dtype is not None:
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')
if x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)

return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name)