Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] reformat example code with google style No.186-190 #56166

Merged
merged 15 commits into from
Aug 22, 2023
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
251 changes: 126 additions & 125 deletions python/paddle/distributed/auto_parallel/static/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,39 +79,39 @@ class Engine:

.. code-block:: python

import paddle
import paddle.vision.transforms as T
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST

transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)
valid_dataset = MNIST(mode='test', transform=transform)

model = paddle.vision.models.LeNet()
loss = paddle.nn.CrossEntropyLoss()
optimizer = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
metrics = paddle.metric.Accuracy(topk=(1, 2))

engine = auto.Engine(model, loss, optimizer, metrics)
# fit
engine.fit(train_dataset,
epochs=2,
batch_size=64)
# evaluate
engine.evaluate(valid_dataset,
batch_size=64)
# predict
engine.predict(valid_dataset,
batch_size=64)
# save
engine.save("./my_model")
# load
engine.load("./my_model")
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.distributed.fleet import auto
>>> from paddle.vision.datasets import MNIST

>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> train_dataset = MNIST(mode='train', transform=transform)
>>> valid_dataset = MNIST(mode='test', transform=transform)

>>> model = paddle.vision.models.LeNet()
>>> loss = paddle.nn.CrossEntropyLoss()
>>> optimizer = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=model.parameters())
>>> metrics = paddle.metric.Accuracy(topk=(1, 2))

>>> engine = auto.Engine(model, loss, optimizer, metrics)
>>> # fit
>>> engine.fit(train_dataset,
... epochs=2,
... batch_size=64)
>>> # evaluate
>>> engine.evaluate(valid_dataset,
... batch_size=64)
>>> # predict
>>> engine.predict(valid_dataset,
... batch_size=64)
>>> # save
>>> engine.save("./my_model")
>>> # load
>>> engine.load("./my_model")

"""

Expand Down Expand Up @@ -916,27 +916,27 @@ def fit(

.. code-block:: python

import paddle
import paddle.vision.transforms as T
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST

transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)

model = paddle.vision.models.LeNet()
loss = paddle.nn.CrossEntropyLoss()
optimizer = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
metrics = paddle.metric.Accuracy(topk=(1, 2))

engine = auto.Engine(model, loss, optimizer, metrics)
engine.fit(train_dataset,
epochs=2,
batch_size=64)
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.distributed.fleet import auto
>>> from paddle.vision.datasets import MNIST

>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> train_dataset = MNIST(mode='train', transform=transform)

>>> model = paddle.vision.models.LeNet()
>>> loss = paddle.nn.CrossEntropyLoss()
>>> optimizer = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=model.parameters())
>>> metrics = paddle.metric.Accuracy(topk=(1, 2))

>>> engine = auto.Engine(model, loss, optimizer, metrics)
>>> engine.fit(train_dataset,
... epochs=2,
... batch_size=64)
"""
self._mode = 'train'
self._inputs_spec, self._labels_spec = self._prepare_data_spec(
Expand Down Expand Up @@ -1069,23 +1069,23 @@ def evaluate(

.. code-block:: python

import paddle
import paddle.vision.transforms as T
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.distributed.fleet import auto
>>> from paddle.vision.datasets import MNIST

transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
valid_dataset = MNIST(mode='test', transform=transform)
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> valid_dataset = MNIST(mode='test', transform=transform)

model = paddle.vision.models.LeNet()
loss = paddle.nn.CrossEntropyLoss()
metrics = paddle.metric.Accuracy(topk=(1, 2))
>>> model = paddle.vision.models.LeNet()
>>> loss = paddle.nn.CrossEntropyLoss()
>>> metrics = paddle.metric.Accuracy(topk=(1, 2))

engine = auto.Engine(model, loss, metrics=metrics)
engine.evaluate(valid_dataset, batch_size=64)
>>> engine = auto.Engine(model, loss, metrics=metrics)
>>> engine.evaluate(valid_dataset, batch_size=64)

"""
self._mode = 'eval'
Expand Down Expand Up @@ -1179,21 +1179,21 @@ def predict(

.. code-block:: python

import paddle
import paddle.vision.transforms as T
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
>>> import paddle
>>> import paddle.vision.transforms as T
>>> from paddle.distributed.fleet import auto
>>> from paddle.vision.datasets import MNIST

transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
valid_dataset = MNIST(mode='test', transform=transform)
>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> valid_dataset = MNIST(mode='test', transform=transform)

model = paddle.vision.models.LeNet()
>>> model = paddle.vision.models.LeNet()

engine = auto.Engine(model)
engine.predict(valid_dataset, batch_size=64)
>>> engine = auto.Engine(model)
>>> engine.predict(valid_dataset, batch_size=64)
"""
self._mode = 'predict'
self._inputs_spec, self._labels_spec = self._prepare_data_spec(
Expand Down Expand Up @@ -1648,28 +1648,29 @@ def save(self, path, training=True):
Examples:

.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST

transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)

model = paddle.vision.models.LeNet()
loss = paddle.nn.CrossEntropyLoss()
optimizer = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
metrics = paddle.metric.Accuracy(topk=(1, 2))

engine = auto.Engine(model, loss, optimizer, metrics)
engine.fit(train_dataset,
epochs=1,
batch_size=64)
engine.save("./my_model")

>>> import paddle
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
>>> import paddle.vision.transforms as T
>>> from paddle.distributed.fleet import auto
>>> from paddle.vision.datasets import MNIST

>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> train_dataset = MNIST(mode='train', transform=transform)

>>> model = paddle.vision.models.LeNet()
>>> loss = paddle.nn.CrossEntropyLoss()
>>> optimizer = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=model.parameters())
>>> metrics = paddle.metric.Accuracy(topk=(1, 2))

>>> engine = auto.Engine(model, loss, optimizer, metrics)
>>> engine.fit(train_dataset,
... epochs=1,
... batch_size=64)
>>> engine.save("./my_model")

"""
if training:
Expand Down Expand Up @@ -1732,29 +1733,29 @@ def load(self, path, strict=True, load_optimizer=True):
Examples:

.. code-block:: python
import paddle
import paddle.vision.transforms as T
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST

transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5])
])
train_dataset = MNIST(mode='train', transform=transform)

model = paddle.vision.models.LeNet()
loss = paddle.nn.CrossEntropyLoss()
optimizer = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
metrics = paddle.metric.Accuracy(topk=(1, 2))

engine = auto.Engine(model, loss, optimizer, metrics)
engine.fit(train_dataset,
epochs=1,
batch_size=64)
engine.save("./my_model")
engine.load("./my_model")
>>> import paddle
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
>>> import paddle.vision.transforms as T
>>> from paddle.distributed.fleet import auto
>>> from paddle.vision.datasets import MNIST

>>> transform = T.Compose([
... T.Transpose(),
... T.Normalize([127.5], [127.5])
>>> ])
>>> train_dataset = MNIST(mode='train', transform=transform)

>>> model = paddle.vision.models.LeNet()
>>> loss = paddle.nn.CrossEntropyLoss()
>>> optimizer = paddle.optimizer.Adam(
... learning_rate=0.001, parameters=model.parameters())
>>> metrics = paddle.metric.Accuracy(topk=(1, 2))

>>> engine = auto.Engine(model, loss, optimizer, metrics)
>>> engine.fit(train_dataset,
... epochs=1,
... batch_size=64)
>>> engine.save("./my_model")
>>> engine.load("./my_model")

"""
self._strict = strict
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,13 @@ class ProcessMesh(core.ProcessMesh):
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
>>> import paddle
>>> import paddle.distributed as dist
>>> paddle.enable_static()
paddle.enable_static()
mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]])
assert mesh.shape == [2, 3]
assert mesh.processe_ids == [2, 4, 5, 0, 1, 3]
>>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]])
>>> assert mesh.shape == [2, 3]
PommesPeter marked this conversation as resolved.
Show resolved Hide resolved
>>> assert mesh.process_ids == [2, 4, 5, 0, 1, 3]
"""

Expand Down
Loading