Skip to content

Commit

Permalink
[xdoctest][task 181-183] reformat example code with google style in `…
Browse files Browse the repository at this point in the history
…sparse/multiary.py`,`distributed/auto_parallel/*` (PaddlePaddle#56665)

* [Doctest]fix No.181-183, test=docs_preview

* add env skip
  • Loading branch information
ooooo-create authored and BeingGod committed Sep 9, 2023
1 parent d114959 commit 0fe8b38
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 40 deletions.
34 changes: 18 additions & 16 deletions python/paddle/distributed/auto_parallel/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,14 @@ def shard_tensor(x, process_mesh=None, shard_spec=None):
Examples:
.. code-block:: python
import paddle
from paddle.distributed.fleet import auto
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import paddle
>>> from paddle.distributed.fleet import auto
mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
x = paddle.ones([4, 6])
shard_spec = ["x", "y"]
auto.shard_tensor(x, mesh, shard_spec)
>>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
>>> x = paddle.ones([4, 6])
>>> shard_spec = ["x", "y"]
>>> auto.shard_tensor(x, mesh, shard_spec)
"""

Expand Down Expand Up @@ -145,16 +146,17 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
Examples:
.. code-block:: python
import paddle
from paddle.distributed.fleet import auto
x = paddle.ones([4, 6])
y = paddle.zeros([4, 6])
mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
dist_add = auto.shard_op(paddle.add,
in_shard_specs=[["x", "y"], ["y", None]],
out_shard_specs=[[None, "x"]])
dist_add(x, y)
>>> import paddle
>>> from paddle.distributed.fleet import auto
>>> x = paddle.ones([4, 6])
>>> y = paddle.zeros([4, 6])
>>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
>>> dist_add = auto.shard_op(paddle.add,
... mesh,
... in_shard_specs=[["x", "y"], ["y", None]],
... out_shard_specs=[[None, "x"]])
>>> dist_add(x, y)
"""

Expand Down
10 changes: 5 additions & 5 deletions python/paddle/distributed/auto_parallel/process_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,12 @@ class ProcessMesh(core.ProcessMesh):
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
>>> import paddle
>>> import paddle.distributed as dist
mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"])
assert mesh.shape == [2, 3]
assert mesh.process_ids == [2, 4, 5, 0, 1, 3]
>>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"])
>>> assert mesh.shape == [2, 3]
>>> assert mesh.process_ids == [2, 4, 5, 0, 1, 3]
"""

Expand Down
39 changes: 20 additions & 19 deletions python/paddle/sparse/multiary.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,25 +58,26 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
.. code-block:: python
# required: gpu
import paddle
# dense + csr @ dense -> dense
input = paddle.rand([3, 2])
crows = [0, 1, 2, 3]
cols = [1, 2, 0]
values = [1., 2., 3.]
x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
# dense + coo @ dense -> dense
input = paddle.rand([3, 2])
indices = [[0, 1, 2], [1, 2, 0]]
values = [1., 2., 3.]
x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
>>> # doctest: +REQUIRES(env:GPU)
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> # dense + csr @ dense -> dense
>>> input = paddle.rand([3, 2])
>>> crows = [0, 1, 2, 3]
>>> cols = [1, 2, 0]
>>> values = [1., 2., 3.]
>>> x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
>>> y = paddle.rand([3, 2])
>>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
>>> # dense + coo @ dense -> dense
>>> input = paddle.rand([3, 2])
>>> indices = [[0, 1, 2], [1, 2, 0]]
>>> values = [1., 2., 3.]
>>> x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
>>> y = paddle.rand([3, 2])
>>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
"""
return _C_ops.sparse_addmm(input, x, y, beta, alpha)

0 comments on commit 0fe8b38

Please sign in to comment.