Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] reformat example code with google style in No.150-160 #56178

Merged
merged 2 commits into from
Aug 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 37 additions & 30 deletions python/paddle/text/datasets/wmt16.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,13 @@ class WMT16(Dataset):

Args:
data_file(str): path to data tar file, can be set None if
:attr:`download` is True. Default None
mode(str): 'train', 'test' or 'val'. Default 'train'
:attr:`download` is True. Default None.
mode(str): 'train', 'test' or 'val'. Default 'train'.
src_dict_size(int): word dictionary size for source language word. Default -1.
trg_dict_size(int): word dictionary size for target language word. Default -1.
lang(str): source language, 'en' or 'de'. Default 'en'.
download(bool): whether to download dataset automatically if
:attr:`data_file` is not set. Default True
:attr:`data_file` is not set. Default True.

Returns:
Dataset: Instance of WMT16 dataset. The instance of dataset has 3 fields:
Expand All @@ -77,30 +77,37 @@ class WMT16(Dataset):

.. code-block:: python

import paddle
from paddle.text.datasets import WMT16

class SimpleNet(paddle.nn.Layer):
def __init__(self):
super().__init__()

def forward(self, src_ids, trg_ids, trg_ids_next):
return paddle.sum(src_ids), paddle.sum(trg_ids), paddle.sum(trg_ids_next)

paddle.disable_static()

wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50)

for i in range(10):
src_ids, trg_ids, trg_ids_next = wmt16[i]
src_ids = paddle.to_tensor(src_ids)
trg_ids = paddle.to_tensor(trg_ids)
trg_ids_next = paddle.to_tensor(trg_ids_next)

model = SimpleNet()
src_ids, trg_ids, trg_ids_next = model(src_ids, trg_ids, trg_ids_next)
print(src_ids.numpy(), trg_ids.numpy(), trg_ids_next.numpy())

>>> import paddle
>>> from paddle.text.datasets import WMT16

>>> class SimpleNet(paddle.nn.Layer):
... def __init__(self):
... super().__init__()
...
... def forward(self, src_ids, trg_ids, trg_ids_next):
... return paddle.sum(src_ids), paddle.sum(trg_ids), paddle.sum(trg_ids_next)

>>> wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50)

>>> for i in range(10):
... src_ids, trg_ids, trg_ids_next = wmt16[i]
... src_ids = paddle.to_tensor(src_ids)
... trg_ids = paddle.to_tensor(trg_ids)
... trg_ids_next = paddle.to_tensor(trg_ids_next)
...
... model = SimpleNet()
... src_ids, trg_ids, trg_ids_next = model(src_ids, trg_ids, trg_ids_next)
... print(src_ids.item(), trg_ids.item(), trg_ids_next.item())
89 32 33
79 18 19
55 26 27
147 36 37
106 22 23
135 50 51
54 43 44
217 30 31
146 51 52
55 24 25
"""

def __init__(
Expand Down Expand Up @@ -257,9 +264,9 @@ def get_dict(self, lang, reverse=False):

.. code-block:: python

from paddle.text.datasets import WMT16
wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50)
en_dict = wmt16.get_dict('en')
>>> from paddle.text.datasets import WMT16
>>> wmt16 = WMT16(mode='train', src_dict_size=50, trg_dict_size=50)
>>> en_dict = wmt16.get_dict('en')

"""
dict_size = (
Expand Down
62 changes: 38 additions & 24 deletions python/paddle/text/viterbi_decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,20 +42,27 @@ def viterbi_decode(
Returns:
scores(Tensor): The output tensor containing the score for the Viterbi sequence. The shape is [batch_size]
and the data type is float32 or float64.
paths(Tensor): The output tensor containing the highest scoring tag indices. The shape is [batch_size, sequence_length]
and the data type is int64.
paths(Tensor): The output tensor containing the highest scoring tag indices. The shape is [batch_size, sequence_length]
and the data type is int64.

Example:
Examples:
.. code-block:: python

import paddle
paddle.seed(102)
batch_size, seq_len, num_tags = 2, 4, 3
emission = paddle.rand((batch_size, seq_len, num_tags), dtype='float32')
length = paddle.randint(1, seq_len + 1, [batch_size])
tags = paddle.randint(0, num_tags, [batch_size, seq_len])
transition = paddle.rand((num_tags, num_tags), dtype='float32')
scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
>>> import paddle
>>> paddle.seed(2023)
>>> batch_size, seq_len, num_tags = 2, 4, 3
>>> emission = paddle.rand((batch_size, seq_len, num_tags), dtype='float32')
>>> length = paddle.randint(1, seq_len + 1, [batch_size])
>>> tags = paddle.randint(0, num_tags, [batch_size, seq_len])
>>> transition = paddle.rand((num_tags, num_tags), dtype='float32')
>>> scores, path = paddle.text.viterbi_decode(emission, transition, length, False)
>>> print(scores)
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[2.57385254, 2.04533720])
>>> print(path)
Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0],
[1, 1]])
"""
if in_dygraph_mode():
return _C_ops.viterbi_decode(
Expand Down Expand Up @@ -95,7 +102,7 @@ class ViterbiDecoder(Layer):
Decode the highest scoring sequence of tags computed by transitions and potentials and get the viterbi path.

Args:
transitions (`Tensor`): The transition matrix. Its dtype is float32 and has a shape of `[num_tags, num_tags]`.
transitions (`Tensor`): The transition matrix. Its dtype is float32 and has a shape of `[num_tags, num_tags]`.
include_bos_eos_tag (`bool`, optional): If set to True, the last row and the last column of transitions will be considered
as start tag, the second to last row and the second to last column of transitions will be considered as stop tag. Defaults to ``True``.
name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please
Expand All @@ -104,27 +111,34 @@ class ViterbiDecoder(Layer):
Shape:
potentials (Tensor): The input tensor of unary emission. This is a 3-D tensor with shape of
[batch_size, sequence_length, num_tags]. The data type is float32 or float64.
lengths (Tensor): The input tensor of length of each sequence. This is a 1-D tensor with shape of
lengths (Tensor): The input tensor of length of each sequence. This is a 1-D tensor with shape of
[batch_size]. The data type is int64.

Returns:
scores(Tensor): The output tensor containing the score for the Viterbi sequence. The shape is [batch_size]
and the data type is float32 or float64.
paths(Tensor): The output tensor containing the highest scoring tag indices. The shape is [batch_size, sequence_length]
paths(Tensor): The output tensor containing the highest scoring tag indices. The shape is [batch_size, sequence_length]
and the data type is int64.

Example:
Examples:
.. code-block:: python

import paddle
paddle.seed(102)
batch_size, seq_len, num_tags = 2, 4, 3
emission = paddle.rand((batch_size, seq_len, num_tags), dtype='float32')
length = paddle.randint(1, seq_len + 1, [batch_size])
tags = paddle.randint(0, num_tags, [batch_size, seq_len])
transition = paddle.rand((num_tags, num_tags), dtype='float32')
decoder = paddle.text.ViterbiDecoder(transition, include_bos_eos_tag=False)
scores, path = decoder(emission, length) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
>>> import paddle
>>> paddle.seed(2023)
>>> batch_size, seq_len, num_tags = 2, 4, 3
>>> emission = paddle.rand((batch_size, seq_len, num_tags), dtype='float32')
>>> length = paddle.randint(1, seq_len + 1, [batch_size])
>>> tags = paddle.randint(0, num_tags, [batch_size, seq_len])
>>> transition = paddle.rand((num_tags, num_tags), dtype='float32')
>>> decoder = paddle.text.ViterbiDecoder(transition, include_bos_eos_tag=False)
>>> scores, path = decoder(emission, length)
>>> print(scores)
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[2.57385254, 2.04533720])
>>> print(path)
Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0],
[1, 1]])
"""

def __init__(self, transitions, include_bos_eos_tag=True, name=None):
Expand Down
29 changes: 16 additions & 13 deletions python/paddle/vision/models/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@

def _make_divisible(v, divisor=8, min_value=None):
"""
This function ensures that all layers have a channel number that is divisible by divisor
This function ensures that all layers have a channel number that is divisible by divisor.
You can also see at https://github.com/keras-team/keras/blob/8ecef127f70db723c158dbe9ed3268b3d610ab55/keras/applications/mobilenet_v2.py#L505

Args:
divisor (int): The divisor for number of channels. Default: 8.
divisor (int, optional): The divisor for number of channels. Default: 8.
min_value (int, optional): The minimum value of number of channels, if it is None,
the default is divisor. Default: None.
the default is divisor. Default: None.
"""
if min_value is None:
min_value = divisor
Expand All @@ -50,22 +50,25 @@ class IntermediateLayerGetter(nn.LayerDict):
So if `model` is passed, `model.feature1` can be returned, but not `model.feature1.layer2`.

Args:
model (nn.Layer): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names of the layers for

model (nn.Layer): Model on which we will extract the features.
return_layers (Dict[name, new_name]): A dict containing the names of the layers for
which the activations will be returned as the key of the dict, and the value of the
dict is the name of the returned activation (which the user can specify).

Examples:

.. code-block:: python

import paddle
m = paddle.vision.models.resnet18(pretrained=False)
# extract layer1 and layer3, giving as names `feat1` and feat2`
new_m = paddle.vision.models._utils.IntermediateLayerGetter(m,
{'layer1': 'feat1', 'layer3': 'feat2'})
out = new_m(paddle.rand([1, 3, 224, 224]))
print([(k, v.shape) for k, v in out.items()])
# [('feat1', [1, 64, 56, 56]), ('feat2', [1, 256, 14, 14])]
>>> import paddle
>>> m = paddle.vision.models.resnet18(pretrained=False)

>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = paddle.vision.models._utils.IntermediateLayerGetter(m,
... {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(paddle.rand([1, 3, 224, 224]))
>>> print([(k, v.shape) for k, v in out.items()])
[('feat1', [1, 64, 56, 56]), ('feat2', [1, 256, 14, 14])]
"""

__annotations__ = {
Expand Down
40 changes: 19 additions & 21 deletions python/paddle/vision/models/alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,24 +75,22 @@ class AlexNet(nn.Layer):

Args:
num_classes (int, optional): Output dim of last fc layer. If num_classes <= 0, last fc layer
will not be defined. Default: 1000.
will not be defined. Default: 1000.

Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.

Examples:
.. code-block:: python

import paddle
from paddle.vision.models import AlexNet
>>> import paddle
>>> from paddle.vision.models import AlexNet

alexnet = AlexNet()

x = paddle.rand([1, 3, 224, 224])
out = alexnet(x)

print(out.shape)
# [1, 1000]
>>> alexnet = AlexNet()
>>> x = paddle.rand([1, 3, 224, 224])
>>> out = alexnet(x)
>>> print(out.shape)
[1, 1000]
"""

def __init__(self, num_classes=1000):
Expand Down Expand Up @@ -197,7 +195,7 @@ def alexnet(pretrained=False, **kwargs):

Args:
pretrained (bool, optional): Whether to load pre-trained weights. If True, returns a model pre-trained
on ImageNet. Default: False.
on ImageNet. Default: False.
**kwargs (optional): Additional keyword arguments. For details, please refer to :ref:`AlexNet <api_paddle_vision_AlexNet>`.

Returns:
Expand All @@ -206,19 +204,19 @@ def alexnet(pretrained=False, **kwargs):
Examples:
.. code-block:: python

import paddle
from paddle.vision.models import alexnet
>>> import paddle
>>> from paddle.vision.models import alexnet

# build model
model = alexnet()
>>> # Build model
>>> model = alexnet()

# build model and load imagenet pretrained weight
# model = alexnet(pretrained=True)
>>> # Build model and load imagenet pretrained weight
>>> # model = alexnet(pretrained=True)

x = paddle.rand([1, 3, 224, 224])
out = model(x)
>>> x = paddle.rand([1, 3, 224, 224])
>>> out = model(x)

print(out.shape)
# [1, 1000]
>>> print(out.shape)
[1, 1000]
"""
return _alexnet('alexnet', pretrained, **kwargs)
Loading