Skip to content

Commit

Permalink
[codestyle][ruff] enable PGH004 (#57941)
Browse files Browse the repository at this point in the history
* [codestyle] enable PGH004

* Update pyproject.toml

Co-authored-by: Nyakku Shigure <sigure.qaq@gmail.com>

---------

Co-authored-by: Nyakku Shigure <sigure.qaq@gmail.com>
  • Loading branch information
gouzil and SigureMo authored Oct 9, 2023
1 parent d0fd7ff commit 3462832
Show file tree
Hide file tree
Showing 31 changed files with 55 additions and 52 deletions.
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ select = [
"PLR1711",
"PLR1722",
"PLW3301",

# Pygrep-hooks
"PGH004",
]
unfixable = [
"NPY001"
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from . import sequence_parallel_utils


__all__ = ["LocalFS", "recompute", "DistributedInfer", "HDFSClient"] # noqa
__all__ = ["LocalFS", "recompute", "DistributedInfer", "HDFSClient"]


def recompute(function, *args, **kwargs):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distribution/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from paddle.distribution.laplace import Laplace
from paddle.distribution.geometric import Geometric

__all__ = [ # noqa
__all__ = [
'Bernoulli',
'Beta',
'Categorical',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distribution/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
variable,
)

__all__ = [ # noqa
__all__ = [
'Transform',
'AbsTransform',
'AffineTransform',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/asp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from .supported_layer_list import add_supported_layer # noqa: F401


__all__ = [ # noqa
__all__ = [
'calculate_density',
'decorate',
'prune_model',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/autograd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from .primx import prim2orig
from .utils import disable_prim, enable_prim, prim_enabled

__all__ = [ # noqa
__all__ = [
'vjp',
'jvp',
'Jacobian',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/incubate/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from .layer.fused_dropout_add import FusedDropoutAdd # noqa: F401
from .layer.fused_dropout_nd import FusedDropout # noqa: F401

__all__ = [ # noqa
__all__ = [
'FusedMultiHeadAttention',
'FusedFeedForward',
'FusedTransformerEncoderLayer',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/inference/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
XpuConfig,
)

__all__ = [ # noqa
__all__ = [
'Config',
'DataType',
'PlaceType',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from .dataloader import Subset # noqa: F401
from .dataloader import random_split # noqa: F401

__all__ = [ # noqa
__all__ = [
'Dataset',
'IterableDataset',
'TensorDataset',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from .dy2static.logging_utils import set_code_level, set_verbosity
from .translated_layer import TranslatedLayer

__all__ = [ # noqa
__all__ = [
'save',
'load',
'to_static',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/metric/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from .metrics import Auc # noqa: F401
from .metrics import accuracy # noqa: F401

__all__ = [ # noqa
__all__ = [
'Metric',
'Accuracy',
'Precision',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def weight_norm(*args):
return utils.weight_norm(*args)


__all__ = [ # noqa
__all__ = [
'BatchNorm',
'CELU',
'GroupNorm',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@
from .flash_attention import scaled_dot_product_attention
from .flash_attention import sdp_kernel

__all__ = [ # noqa
__all__ = [
'celu',
'conv1d',
'conv1d_transpose',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/initializer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
from .kaiming import MSRAInitializer # noqa: F401
from .assign import NumpyArrayInitializer # noqa: F401

__all__ = [ # noqa
__all__ = [
'Bilinear',
'Constant',
'KaimingUniform',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/layer/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from paddle.base import core, framework, unique_name
from paddle.base.core import VarDesc
from paddle.base.dygraph import no_grad
from paddle.base.dygraph.base import in_declarative_mode # noqa F401
from paddle.base.dygraph.base import in_declarative_mode # noqa: F401
from paddle.base.dygraph.base import (
_convert_into_variable,
in_to_static_mode,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from .clip_grad_norm_ import clip_grad_norm_ # noqa: F401
from .clip_grad_value_ import clip_grad_value_ # noqa: F401

__all__ = [ # noqa
__all__ = [
'weight_norm',
'remove_weight_norm',
'spectral_norm',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from .lbfgs import LBFGS # noqa: F401
from . import lr # noqa: F401

__all__ = [ # noqa
__all__ = [
'Optimizer',
'Adagrad',
'Adam',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/optimizer/lr.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
)
from paddle.base.layer_helper import LayerHelper

__all__ = [ # noqa
__all__ = [
'LRScheduler',
'NoamDecay',
'PiecewiseDecay',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/static/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
from ..base.framework import program_guard # noqa: F401
from ..base.framework import Program # noqa: F401

__all__ = [ # noqa
__all__ = [
'append_backward',
'gradients',
'Executor',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/static/nn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
from .control_flow import cond
from .static_pylayer import static_pylayer

__all__ = [ # noqa
__all__ = [
'fc',
'batch_norm',
'bilinear_tensor_product',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@
from ..signal import stft # noqa: F401

# this list used in math_op_patch.py for _binary_creator_
tensor_method_func = [ # noqa
tensor_method_func = [
'create_parameter',
'create_tensor',
'matmul',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/text/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from .datasets import WMT14 # noqa: F401
from .datasets import WMT16 # noqa: F401

__all__ = [ # noqa
__all__ = [
'Conll05st',
'Imdb',
'Imikolov',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@
from .layers_utils import _contain_var # noqa: F401
from .layers_utils import _convert_to_tensor_list # noqa: F401

__all__ = ['deprecated', 'run_check', 'require_version', 'try_import'] # noqa
__all__ = ['deprecated', 'run_check', 'require_version', 'try_import']
2 changes: 1 addition & 1 deletion python/paddle/utils/cpp_extension/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from .extension_utils import get_build_directory # noqa: F401
from .extension_utils import load_op_meta_info_and_register_op # noqa: F401

__all__ = [ # noqa
__all__ = [
'CppExtension',
'CUDAExtension',
'load',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/utils/unique_name.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@
from ..base.unique_name import guard # noqa: F401
from ..base.unique_name import switch # noqa: F401

__all__ = ['generate', 'switch', 'guard'] # noqa
__all__ = ['generate', 'switch', 'guard']
2 changes: 1 addition & 1 deletion python/paddle/vision/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,4 +112,4 @@
from .transforms import adjust_hue # noqa: F401
from .transforms import normalize # noqa: F401

__all__ = ['set_image_backend', 'get_image_backend', 'image_load'] # noqa
__all__ = ['set_image_backend', 'get_image_backend', 'image_load']
2 changes: 1 addition & 1 deletion python/paddle/vision/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from .cifar import Cifar100 # noqa: F401
from .voc2012 import VOC2012 # noqa: F401

__all__ = [ # noqa
__all__ = [
'DatasetFolder',
'ImageFolder',
'MNIST',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/vision/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
from .shufflenetv2 import shufflenet_v2_x2_0 # noqa: F401
from .shufflenetv2 import shufflenet_v2_swish # noqa: F401

__all__ = [ # noqa
__all__ = [
'ResNet',
'resnet18',
'resnet34',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/vision/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from ..nn import BatchNorm2D, Conv2D, Layer, ReLU, Sequential
from ..nn.initializer import Normal

__all__ = [ # noqa
__all__ = [
'yolo_loss',
'yolo_box',
'prior_box',
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/vision/transforms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
from .functional import normalize # noqa: F401
from .functional import erase # noqa: F401

__all__ = [ # noqa
__all__ = [
'BaseTransform',
'Compose',
'Resize',
Expand Down
46 changes: 23 additions & 23 deletions test/autograd/test_autograd_functional_dynamic.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,28 +145,28 @@ def check_results(self, ref, res):
class TestVJP(TestAutogradFunctional):
def func_vjp_i1o1(self):
test_cases = [
[reduce, 'A'], # noqa
[reduce_dim, 'A'], # noqa
] # noqa
[reduce, 'A'],
[reduce_dim, 'A'],
]
for f, inputs in test_cases:
vjp, grad = self.gen_test_pairs(f, inputs)
vjp_result, grad_result = vjp(), grad()
self.check_results(grad_result, vjp_result)

def func_vjp_i2o1(self):
test_cases = [
[matmul, ['A', 'B']], # noqa
[mul, ['b', 'c']], # noqa
] # noqa
[matmul, ['A', 'B']],
[mul, ['b', 'c']],
]
for f, inputs in test_cases:
vjp, grad = self.gen_test_pairs(f, inputs)
vjp_result, grad_result = vjp(), grad()
self.check_results(grad_result, vjp_result)

def func_vjp_i2o2(self):
test_cases = [
[o2, ['A', 'A']], # noqa
] # noqa
[o2, ['A', 'A']],
]
for f, inputs in test_cases:
inputs = self.gen_inputs(inputs)
v = make_v(f, inputs)
Expand All @@ -176,8 +176,8 @@ def func_vjp_i2o2(self):

def func_vjp_i2o2_omitting_v(self):
test_cases = [
[o2, ['A', 'A']], # noqa
] # noqa
[o2, ['A', 'A']],
]
for f, inputs in test_cases:
inputs = self.gen_inputs(inputs)
vjp, grad = self.gen_test_pairs(f, inputs)
Expand All @@ -187,7 +187,7 @@ def func_vjp_i2o2_omitting_v(self):
def func_vjp_nested(self):
x = self.gen_input('a')
test_cases = [
[nested(x), 'a'], # noqa
[nested(x), 'a'],
]
for f, inputs in test_cases:
vjp, grad = self.gen_test_pairs(f, inputs)
Expand Down Expand Up @@ -274,39 +274,39 @@ def jac(grad_fn, f, inputs):
class TestJVP(TestAutogradFunctional):
def func_jvp_i1o1(self):
test_cases = [
[reduce, 'A'], # noqa
[reduce_dim, 'A'], # noqa
] # noqa
[reduce, 'A'],
[reduce_dim, 'A'],
]
for f, inputs in test_cases:
inputs = self.gen_inputs(inputs)
forward_jac = jac(paddle.incubate.autograd.jvp, f, inputs)
reverse_jac = jac(paddle.incubate.autograd.vjp, f, inputs)
self.check_results(forward_jac, reverse_jac)

def func_jvp_i2o1(self):
test_cases = [ # noqa
[matmul, ['A', 'B']], # noqa
] # noqa
test_cases = [
[matmul, ['A', 'B']],
]
for f, inputs in test_cases:
inputs = self.gen_inputs(inputs)
forward_jac = jac(paddle.incubate.autograd.jvp, f, inputs)
reverse_jac = jac(paddle.incubate.autograd.vjp, f, inputs)
self.check_results(forward_jac, reverse_jac)

def func_jvp_i2o2(self):
test_cases = [ # noqa
[o2, ['A', 'A']], # noqa
] # noqa
test_cases = [
[o2, ['A', 'A']],
]
for f, inputs in test_cases:
inputs = self.gen_inputs(inputs)
forward_jac = jac(paddle.incubate.autograd.jvp, f, inputs)
reverse_jac = jac(paddle.incubate.autograd.vjp, f, inputs)
self.check_results(forward_jac, reverse_jac)

def func_jvp_i2o2_omitting_v(self):
test_cases = [ # noqa
[o2, ['A', 'A']], # noqa
] # noqa
test_cases = [
[o2, ['A', 'A']],
]
for f, inputs in test_cases:
inputs = self.gen_inputs(inputs)
results_omitting_v = paddle.incubate.autograd.jvp(f, inputs)
Expand Down

0 comments on commit 3462832

Please sign in to comment.