Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[xdoctest][task 141] Reformat example code with google style in quantization/post_training_quantization.py #56238

Merged
merged 4 commits into from
Sep 18, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 40 additions & 37 deletions python/paddle/static/quantization/post_training_quantization.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
except:
from .utils import tqdm


from paddle.base.framework import IrGraph, _get_var

from ... import io, static
Expand Down Expand Up @@ -152,7 +151,7 @@ def __init__(
return_graph=False,
deploy_backend=None,
):
'''
"""
Constructor.

Args:
Expand Down Expand Up @@ -248,41 +247,45 @@ def __init__(
None

Examples:
.. code-block:: python
import paddle.static as static
from paddle.static.quantization import PostTrainingQuantization

exe = static.Executor(paddle.CPUPlace())
model_dir = path/to/fp32_model_params
# set model_filename as None when the filename is __model__,
# otherwise set it as the real filename
model_filename = None
# set params_filename as None when all parameters were saved in
# separate files, otherwise set it as the real filename
params_filename = None
save_model_path = path/to/save_model_path
# prepare the sample generator according to the model, and the
# sample generator must return a sample every time. The reference
# document: https://www.paddlepaddle.org.cn/documentation/docs/zh
# /user_guides/howto/prepare_data/use_py_reader.html
sample_generator = your_sample_generator
batch_size = 10
batch_nums = 10
algo = "KL"
quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"]
ptq = PostTrainingQuantization(
executor=exe,
sample_generator=sample_generator,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
batch_size=batch_size,
batch_nums=batch_nums,
algo=algo,
quantizable_op_type=quantizable_op_type)
ptq.quantize()
ptq.save_quantized_model(save_model_path)
'''
.. code-block:: python

>>> # doctest: +SKIP("There are some example variables in the code.")
>>> import paddle.static as static
>>> from paddle.static.quantization import PostTrainingQuantization

>>> exe = static.Executor(paddle.CPUPlace())
>>> model_dir = "path/to/fp32_model_params"
>>> # set model_filename as None when the filename is __model__,
>>> # otherwise set it as the real filename
>>> model_filename = None
>>> # set params_filename as None when all parameters were saved in
>>> # separate files, otherwise set it as the real filename
>>> params_filename = None
>>> save_model_path = "path/to/save_model_path"
>>> # prepare the sample generator according to the model, and the
>>> # sample generator must return a sample every time. The reference
>>> # document: https://www.paddlepaddle.org.cn/documentation/docs/zh
>>> # /user_guides/howto/prepare_data/use_py_reader.html
>>> data_loader = your_data_loader
>>> batch_size = 10
>>> batch_nums = 10
>>> algo = "KL"
>>> quantizable_op_type = ["conv2d", "depthwise_conv2d", "mul"]
>>> ptq = PostTrainingQuantization(
... executor=exe,
... sample_generator=None,
... data_loader=data_loader,
... model_dir=model_dir,
... model_filename=model_filename,
... params_filename=params_filename,
... batch_size=batch_size,
... batch_nums=batch_nums,
... algo=algo,
... quantizable_op_type=quantizable_op_type
... )
>>> ptq.quantize()
>>> ptq.save_quantized_model(save_model_path)
"""

self._support_activation_quantize_type = [
'range_abs_max',
Expand Down