Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest] reformat example code with google style in No.95-99 #55834

Merged
merged 2 commits into from
Aug 3, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 15 additions & 13 deletions python/paddle/nn/quant/quant_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,19 +611,21 @@ class QuantizedConv2DTranspose(Layer):
The only difference is that its inputs are all fake quantized.

Examples:
.. code-block:: python

import paddle
import paddle.nn as nn
from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose

x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2DTranspose(4, 6, (3, 3))
conv_quantized = QuantizedConv2DTranspose(conv)
y_quantized = conv_quantized(x_var)
y_var = conv(x_var)
print(y_var.shape, y_quantized.shape)
# [2, 6, 10, 10], [2, 6, 10, 10]
.. code-block:: python

>>> import paddle
>>> import paddle.nn as nn
>>> from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose

>>> x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
>>> conv = nn.Conv2DTranspose(4, 6, (3, 3))
>>> conv_quantized = QuantizedConv2DTranspose(conv)
>>> y_quantized = conv_quantized(x_var)
>>> y_var = conv(x_var)
>>> print(y_var.shape)
[2, 6, 10, 10]
>>> print(y_quantized.shape)
[2, 6, 10, 10]

"""

Expand Down
49 changes: 29 additions & 20 deletions python/paddle/nn/quant/stub.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,26 +28,35 @@ class Stub(Layer):
It will use a global configuration to create the observers if the 'observer' is none.
Examples:
.. code-block:: python
import paddle
from paddle.nn.quant import Stub
from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
from paddle.nn import Conv2D
from paddle.quantization import QAT, QuantConfig
quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
class Model(paddle.nn.Layer):
def __init__(self, num_classes=10):
super().__init__()
self.conv = Conv2D(3, 6, 3, stride=1, padding=1)
self.quant = Stub(quanter)
def forward(self, inputs):
out = self.conv(inputs)
out = self.quant(out)
return paddle.nn.functional.relu(out)
model = Model()
q_config = QuantConfig(activation=quanter, weight=quanter)
qat = QAT(q_config)
quant_model = qat.quantize(model)
print(quant_model)
>>> import paddle
SigureMo marked this conversation as resolved.
Show resolved Hide resolved
>>> from paddle.nn.quant import Stub
>>> from paddle.quantization.quanters import FakeQuanterWithAbsMaxObserver
>>> from paddle.nn import Conv2D
>>> from paddle.quantization import QAT, QuantConfig
>>> quanter = FakeQuanterWithAbsMaxObserver(moving_rate=0.9)
>>> class Model(paddle.nn.Layer):
... def __init__(self, num_classes=10):
... super().__init__()
... self.conv = Conv2D(3, 6, 3, stride=1, padding=1)
... self.quant = Stub(quanter)
... def forward(self, inputs):
... out = self.conv(inputs)
... out = self.quant(out)
... return paddle.nn.functional.relu(out)
>>> model = Model()
>>> q_config = QuantConfig(activation=quanter, weight=quanter)
>>> qat = QAT(q_config)
>>> quant_model = qat.quantize(model)
>>> print(quant_model)
Model(
(conv): QuantedConv2D(
(weight_quanter): FakeQuanterWithAbsMaxObserverLayer()
(activation_quanter): FakeQuanterWithAbsMaxObserverLayer()
)
(quant): QuanterStub(
(_observer): FakeQuanterWithAbsMaxObserverLayer()
)
)
"""

def __init__(self, observer=None):
Expand Down
20 changes: 10 additions & 10 deletions python/paddle/nn/utils/clip_grad_norm_.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,19 +45,19 @@ def clip_grad_norm_(
Total norm of the parameter gradients (treated as a single vector).
Example:
.. code-block:: python
import paddle
>>> import paddle

x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32')
max_norm = float(5.0)
linear = paddle.nn.Linear(in_features=10, out_features=10)
out = linear(x)
loss = paddle.mean(out)
loss.backward()
>>> x = paddle.uniform([10, 10], min=-1.0, max=1.0, dtype='float32')
>>> max_norm = float(5.0)
>>> linear = paddle.nn.Linear(in_features=10, out_features=10)
>>> out = linear(x)
>>> loss = paddle.mean(out)
>>> loss.backward()

paddle.nn.utils.clip_grad_norm_(linear.parameters(), max_norm)
>>> paddle.nn.utils.clip_grad_norm_(linear.parameters(), max_norm)

sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters())
sdg.step()
>>> sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters())
>>> sdg.step()
"""
if not paddle.in_dynamic_mode():
raise RuntimeError('this API can only run in dynamic mode.')
Expand Down
20 changes: 10 additions & 10 deletions python/paddle/nn/utils/clip_grad_value_.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,16 @@ def clip_grad_value_(
Example:
.. code-block:: python

import paddle
x = paddle.uniform([10, 10], min=-10.0, max=10.0, dtype='float32')
clip_value = float(5.0)
linear = paddle.nn.Linear(in_features=10, out_features=10)
out = linear(x)
loss = paddle.mean(out)
loss.backward()
paddle.nn.utils.clip_grad_value_(linear.parameters(), clip_value)
sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters())
sdg.step()
>>> import paddle
>>> x = paddle.uniform([10, 10], min=-10.0, max=10.0, dtype='float32')
>>> clip_value = float(5.0)
>>> linear = paddle.nn.Linear(in_features=10, out_features=10)
>>> out = linear(x)
>>> loss = paddle.mean(out)
>>> loss.backward()
>>> paddle.nn.utils.clip_grad_value_(linear.parameters(), clip_value)
>>> sdg = paddle.optimizer.SGD(learning_rate=0.1, parameters=linear.parameters())
>>> sdg.step()
"""
if not paddle.in_dynamic_mode():
raise RuntimeError('this API can only run in dynamic mode.')
Expand Down
43 changes: 21 additions & 22 deletions python/paddle/nn/utils/spectral_norm_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,28 +182,27 @@ def spectral_norm(
Layer, the original layer with the spectral norm hook.

Examples:
.. code-block:: python

from paddle.nn import Conv2D
from paddle.nn.utils import spectral_norm

conv = Conv2D(3, 1, 3)
sn_conv = spectral_norm(conv)
print(sn_conv)
# Conv2D(3, 1, kernel_size=[3, 3], data_format=NCHW)
print(sn_conv.weight)
# Tensor(shape=[1, 3, 3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,
# [[[[-0.21090528, 0.18563725, -0.14127982],
# [-0.02310637, 0.03197737, 0.34353802],
# [-0.17117859, 0.33152047, -0.28408015]],
#
# [[-0.13336606, -0.01862637, 0.06959272],
# [-0.02236020, -0.27091628, -0.24532901],
# [ 0.27254242, 0.15516677, 0.09036587]],
#
# [[ 0.30169338, -0.28146112, -0.11768346],
# [-0.45765871, -0.12504843, -0.17482486],
# [-0.36866254, -0.19969313, 0.08783543]]]])
.. code-block:: python

>>> from paddle.nn import Conv2D
>>> from paddle.nn.utils import spectral_norm
>>> paddle.seed(2023)
>>> conv = Conv2D(3, 1, 3)
>>> sn_conv = spectral_norm(conv)
>>> print(sn_conv)
Conv2D(3, 1, kernel_size=[3, 3], data_format=NCHW)
>>> # Conv2D(3, 1, kernel_size=[3, 3], data_format=NCHW)
>>> print(sn_conv.weight)
Tensor(shape=[1, 3, 3, 3], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[[ 0.01668976, 0.30305523, 0.11405435],
[-0.06765547, -0.50396705, -0.40925547],
[ 0.47344422, 0.03628403, 0.45277366]],
[[-0.15177251, -0.16305730, -0.15723954],
[-0.28081197, -0.09183260, -0.08081978],
[-0.40895155, 0.18298769, -0.29325116]],
[[ 0.21819633, -0.01822380, -0.50351536],
[-0.06262003, 0.17713565, 0.20517939],
[ 0.16659889, -0.14333329, 0.05228264]]]])
SigureMo marked this conversation as resolved.
Show resolved Hide resolved

"""

Expand Down