-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add paddle.tensor.math.prod #26351
Add paddle.tensor.math.prod #26351
Changes from 4 commits
79566ea
45d763d
0b0fcda
5123131
fceb02c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from __future__ import print_function | ||
|
||
import paddle | ||
import paddle.fluid as fluid | ||
import unittest | ||
import numpy as np | ||
|
||
|
||
class TestProdOp(unittest.TestCase): | ||
def setUp(self): | ||
self.input = np.random.random(size=(10, 10, 5)).astype(np.float32) | ||
|
||
def run_imperative(self): | ||
input = paddle.to_tensor(self.input) | ||
dy_result = paddle.prod(input) | ||
expected_result = np.prod(self.input) | ||
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) | ||
|
||
dy_result = paddle.prod(input, axis=1) | ||
expected_result = np.prod(self.input, axis=1) | ||
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) | ||
|
||
dy_result = paddle.prod(input, axis=[0, 1]) | ||
expected_result = np.prod(self.input, axis=(0, 1)) | ||
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) | ||
|
||
dy_result = paddle.prod(input, axis=1, keepdim=True) | ||
expected_result = np.prod(self.input, axis=1, keepdims=True) | ||
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) | ||
|
||
dy_result = paddle.prod(input, axis=1, dtype='int64') | ||
expected_result = np.prod(self.input, axis=1, dtype=np.int64) | ||
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) | ||
|
||
dy_result = paddle.prod(input, axis=1, keepdim=True, dtype='int64') | ||
expected_result = np.prod( | ||
self.input, axis=1, keepdims=True, dtype=np.int64) | ||
self.assertTrue(np.allclose(dy_result.numpy(), expected_result)) | ||
|
||
def run_static(self, use_gpu=False): | ||
input = fluid.data(name='input', shape=[10, 10, 5], dtype='float32') | ||
result0 = paddle.prod(input) | ||
result1 = paddle.prod(input, axis=1) | ||
result2 = paddle.prod(input, axis=[0, 1]) | ||
result3 = paddle.prod(input, axis=1, keepdim=True) | ||
result4 = paddle.prod(input, axis=1, dtype='int64') | ||
result5 = paddle.prod(input, axis=1, keepdim=True, dtype='int64') | ||
|
||
place = fluid.CUDAPlace(4) if use_gpu else fluid.CPUPlace() | ||
exe = fluid.Executor(place) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. paddle.static.Executor There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, done. |
||
exe.run(fluid.default_startup_program()) | ||
static_result = exe.run( | ||
feed={"input": self.input}, | ||
fetch_list=[result0, result1, result2, result3, result4, result5]) | ||
|
||
expected_result = np.prod(self.input) | ||
self.assertTrue(np.allclose(static_result[0], expected_result)) | ||
expected_result = np.prod(self.input, axis=1) | ||
self.assertTrue(np.allclose(static_result[1], expected_result)) | ||
expected_result = np.prod(self.input, axis=(0, 1)) | ||
self.assertTrue(np.allclose(static_result[2], expected_result)) | ||
expected_result = np.prod(self.input, axis=1, keepdims=True) | ||
self.assertTrue(np.allclose(static_result[3], expected_result)) | ||
expected_result = np.prod(self.input, axis=1, dtype=np.int64) | ||
self.assertTrue(np.allclose(static_result[4], expected_result)) | ||
expected_result = np.prod( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. axis支持负数吗? 如果支持可以添加单测 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, done. |
||
self.input, axis=1, keepdims=True, dtype=np.int64) | ||
self.assertTrue(np.allclose(static_result[5], expected_result)) | ||
|
||
def test_cpu(self): | ||
paddle.disable_static(place=paddle.fluid.CPUPlace()) | ||
self.run_imperative() | ||
paddle.enable_static() | ||
|
||
with fluid.program_guard(fluid.Program()): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 尽量减少fluid There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, done. |
||
self.run_static() | ||
|
||
def test_gpu(self): | ||
if not fluid.core.is_compiled_with_cuda(): | ||
return | ||
|
||
paddle.disable_static(place=paddle.fluid.CUDAPlace(4)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why is it 4? Please set to 0 to avoid some CI machines only have 2 GPU cards. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, done. |
||
self.run_imperative() | ||
paddle.enable_static() | ||
|
||
with fluid.program_guard(fluid.Program()): | ||
self.run_static(use_gpu=True) | ||
|
||
|
||
class TestProdOpError(unittest.TestCase): | ||
def test_error(self): | ||
with fluid.program_guard(fluid.Program(), fluid.Program()): | ||
x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32') | ||
bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool') | ||
# The argument x shoule be a Tensor | ||
self.assertRaises(TypeError, paddle.prod, [1]) | ||
|
||
# The data type of x should be float32, float64, int32, int64 | ||
self.assertRaises(TypeError, paddle.prod, bool_x) | ||
|
||
# The argument axis's type shoule be int ,list or tuple | ||
self.assertRaises(TypeError, paddle.prod, x, 1.5) | ||
|
||
# The argument dtype of prod_op should be float32, float64, int32 or int64. | ||
self.assertRaises(TypeError, paddle.prod, x, 'bool') | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -63,6 +63,7 @@ | |
from ..fluid.layers import increment #DEFINE_ALIAS | ||
from ..fluid.layers import multiplex #DEFINE_ALIAS | ||
from ..fluid.layers import sums #DEFINE_ALIAS | ||
from ..fluid import layers | ||
|
||
__all__ = [ | ||
'abs', | ||
|
@@ -85,6 +86,7 @@ | |
'log', | ||
'mul', | ||
'multiplex', | ||
'prod', | ||
'pow', | ||
'reciprocal', | ||
'reduce_max', | ||
|
@@ -1622,3 +1624,80 @@ def cumsum(x, axis=None, dtype=None, name=None): | |
kwargs[name] = val | ||
_cum_sum_ = generate_layer_fn('cumsum') | ||
return _cum_sum_(**kwargs) | ||
|
||
def prod(x, axis=None, keepdim=False, dtype=None, name=None): | ||
""" | ||
Compute the product of tensor elements over the given axis. | ||
|
||
Args: | ||
x(Tensor): Input of prod operator. The data type is float32, float64, int32, int64. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The data type is float32, float64, int32, or int64 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, done. |
||
axis(list|int, optional): The axis along which the product is computed. If :attr:`None`, | ||
multiply all elements of `x` and return a Tensor with a single element, | ||
otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`, | ||
the axis to reduce is :math:`x.ndim + axis[i]`. Default is None. | ||
dtype(str, optional): The desired date type of returned tensor, can be float32, float64, | ||
int32, int64. If specified, the input tensor is casted to dtype before operator performed. | ||
This is very useful for avoiding data type overflows. The default value is False. | ||
keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result | ||
tensor will have one fewer dimension than the input unless keep_dim is true. Default is False. | ||
name(string, optional): The default value is None. Normally there is no need for user to set this property. | ||
For more information, please refer to :ref:`api_guide_Name` . | ||
|
||
Returns: | ||
Tensor, result of product on the specified dim of input tensor. | ||
|
||
Examples: | ||
.. code-block:: python | ||
|
||
import paddle | ||
import numpy as np | ||
|
||
paddle.disable_static() | ||
|
||
# the axis is a int element | ||
data_x = np.array([[0.2, 0.3, 0.5, 0.9], | ||
[0.1, 0.2, 0.6, 0.7]]).astype(np.float32) | ||
x = paddle.to_tensor(data_x) | ||
out1 = paddle.prod(x) | ||
print(out1.numpy()) | ||
# [0.0002268] | ||
|
||
out2 = paddle.prod(x, -1) | ||
print(out2.numpy()) | ||
# [0.027 0.0084] | ||
|
||
out3 = paddle.prod(x, 0) | ||
print(out3.numpy()) | ||
# [0.02 0.06 0.3 0.63] | ||
print(out3.numpy().dtype) | ||
# float32 | ||
|
||
out4 = paddle.prod(x, 0, keepdim=True) | ||
print(out4.numpy()) | ||
# [[0.02 0.06 0.3 0.63]] | ||
|
||
out5 = paddle.prod(x, 0, dtype='int64') | ||
print(out5.numpy()) | ||
# [0 0 0 0] | ||
print(out5.numpy().dtype) | ||
# int64 | ||
|
||
# the axis is list | ||
data_y = np.array([[[1.0, 2.0], [3.0, 4.0]], | ||
[[5.0, 6.0], [7.0, 8.0]]]) | ||
y = paddle.to_tensor(data_y) | ||
out6 = paddle.prod(y, [0, 1]) | ||
print(out6.numpy()) | ||
# [105. 384.] | ||
|
||
out7 = paddle.prod(y, (1, 2)) | ||
print(out7.numpy()) | ||
# [ 24. 1680.] | ||
|
||
""" | ||
if dtype is not None: | ||
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod') | ||
if x.dtype != convert_np_dtype_to_dtype_(dtype): | ||
x = layers.cast(x, dtype) | ||
|
||
return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is it 4? Please set to 0 to avoid some CI machines only have 2 GPU cards.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, done.