Skip to content

Commit

Permalink
[CodeStyle][UP034] remove (()) cases (#52060)
Browse files Browse the repository at this point in the history
* add up34

* modify var name in loop

* revert changes in test_slice

* Revert "modify var name in loop"

This reverts commit 6d748e3.

* temporarily ignore test_slice.py

* add comment

* empty commit, re-trigger all ci

* fix inc

---------

Co-authored-by: SigureMo <sigure.qaq@gmail.com>
  • Loading branch information
Liyulingyue and SigureMo authored Mar 29, 2023
1 parent 8082ba8 commit c069729
Show file tree
Hide file tree
Showing 122 changed files with 266 additions and 288 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/operators/generator/cross_validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def main(forward_op_yaml_paths, backward_op_yaml_paths):
with open(op_yaml_path, "rt", encoding="utf-8") as f:
op_list = yaml.safe_load(f)
if op_list is not None:
ops.update(to_named_dict((op_list)))
ops.update(to_named_dict(op_list))

cross_validate(ops)

Expand Down
7 changes: 6 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ select = [
# "UP031",
# "UP032",
"UP033",
# "UP034",
"UP034",
"UP035",
"UP036",

Expand Down Expand Up @@ -168,6 +168,11 @@ unfixable = [
]

[tool.ruff.per-file-ignores]
# Ignore unused imports in __init__.py
"__init__.py" = ["F401"]
# Temporarily ignore test_slice.py to avoid PR-CI-CINN failure, please fix!
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py" = ["UP034"]
# Ignore version check in setup.py
"setup.py" = ["UP036"]
# Ignore unnecessary lambda in dy2st unittest test_lambda
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"]
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/tuner/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def profiler(args):
with open(result_path, 'w') as fp:
json.dump(result_dict, fp)

print("profile done! avg speed : {} step / s.".format((avg_tput)))
print("profile done! avg speed : {} step / s.".format(avg_tput))

except paddle.framework.core.EOFException:
data_loader._inner_dataloader.reset()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def _create_ge_tensor(self, shape, dtype, value):
tensor = core.GETensor(tensor_desc)

data = (
(value * np.ones((shape)))
(value * np.ones(shape))
.reshape(shape)
.astype(self.ascend_helper.dtype2np(dtype))
)
Expand Down Expand Up @@ -282,7 +282,7 @@ def _create_shape_tensor(self):
)
tensor = core.GETensor(tensor_desc)

data = np.ones((2)).astype("int32").reshape([2])
data = np.ones(2).astype("int32").reshape([2])
data[0] = 64
buf = data.tobytes()
data_8 = np.frombuffer(buf, dtype=np.uint8)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distribution/normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def entropy(self):
)
return paddle.add(
0.5 + zero_tmp,
0.5 * math.log(2 * math.pi) + paddle.log((self.scale + zero_tmp)),
0.5 * math.log(2 * math.pi) + paddle.log(self.scale + zero_tmp),
name=name,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1185,7 +1185,7 @@ def test_domain(self):
def test_codomain(self):
self.assertTrue(isinstance(self._t._codomain, variable.Variable))

@param.param_func(((np.random.random((10)),),))
@param.param_func(((np.random.random(10),),))
def test_forward(self, input):
np.testing.assert_allclose(
self._t.inverse(self._t.forward(paddle.to_tensor(input))),
Expand All @@ -1202,7 +1202,7 @@ def test_forward_shape(self, shape, expected_shape):
def test_inverse_shape(self, shape, expected_shape):
self.assertEqual(self._t.inverse_shape(shape), expected_shape)

@param.param_func(((np.random.random((10)),),))
@param.param_func(((np.random.random(10),),))
def test_forward_log_det_jacobian(self, x):
self.assertEqual(
self._t.forward_log_det_jacobian(paddle.to_tensor(x)).shape, [1]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def _dygraph_fn():
x = np.random.random((1, 3)).astype('float32')
with fluid.dygraph.guard():
fluid.dygraph.to_variable(x)
np.random.random((1))
np.random.random(1)


class TestDygraphApiRecognition(unittest.TestCase):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def test_optim_break_in_while(x):

class TestContinueInFor(unittest.TestCase):
def setUp(self):
self.input = np.zeros((1)).astype('int64')
self.input = np.zeros(1).astype('int64')
self.place = (
fluid.CUDAPlace(0)
if fluid.is_compiled_with_cuda()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def test_cache(self):

@to_static
def sum_even_until_limit(max_len, limit):
ret_sum = fluid.dygraph.to_variable(np.zeros((1)).astype('int32'))
ret_sum = fluid.dygraph.to_variable(np.zeros(1).astype('int32'))
for i in range(max_len):
if i % 2 > 0:
continue
Expand All @@ -150,8 +150,8 @@ def sum_even_until_limit(max_len, limit):


def sum_under_while(limit):
i = fluid.dygraph.to_variable(np.zeros((1)).astype('int32'))
ret_sum = fluid.dygraph.to_variable(np.zeros((1)).astype('int32'))
i = fluid.dygraph.to_variable(np.zeros(1).astype('int32'))
ret_sum = fluid.dygraph.to_variable(np.zeros(1).astype('int32'))
while i <= limit:
ret_sum += i
i += 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def test_dic_pop_2(x):

class TestDictPop(unittest.TestCase):
def setUp(self):
self.input = np.random.random((3)).astype('int32')
self.input = np.random.random(3).astype('int32')
self.place = (
paddle.CUDAPlace(0)
if paddle.is_compiled_with_cuda()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def setUp(self):
self.init_dygraph_func()

def init_data(self):
self.input = np.random.random((3)).astype('int32')
self.input = np.random.random(3).astype('int32')

def init_dygraph_func(self):
self.all_dygraph_funcs = [
Expand Down Expand Up @@ -275,7 +275,7 @@ def init_dygraph_func(self):

class TestListInWhileLoop(TestListWithoutControlFlow):
def init_data(self):
self.input = np.random.random((3)).astype('int32')
self.input = np.random.random(3).astype('int32')
self.iter_num = 3

def init_dygraph_func(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def func():

class TestReturnBase(unittest.TestCase):
def setUp(self):
self.input = np.ones((1)).astype('int32')
self.input = np.ones(1).astype('int32')
self.place = (
fluid.CUDAPlace(0)
if fluid.is_compiled_with_cuda()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def train_static(args, batch_generator):
# the best cross-entropy value with label smoothing
loss_normalizer = -(
(1.0 - args.label_smooth_eps)
* np.log((1.0 - args.label_smooth_eps))
* np.log(1.0 - args.label_smooth_eps)
+ args.label_smooth_eps
* np.log(
args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20
Expand Down Expand Up @@ -221,8 +221,7 @@ def train_dygraph(args, batch_generator):
)
# the best cross-entropy value with label smoothing
loss_normalizer = -(
(1.0 - args.label_smooth_eps)
* np.log((1.0 - args.label_smooth_eps))
(1.0 - args.label_smooth_eps) * np.log(1.0 - args.label_smooth_eps)
+ args.label_smooth_eps
* np.log(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,8 @@ def _test(self, use_ipu=False):
pad_batch = self.batch_size - dy_batch
for k, v in feed.items():
pad_size = tuple(
(
(0, 0 if i != 0 else pad_batch)
for i in range(len(v.shape))
)
(0, 0 if i != 0 else pad_batch)
for i in range(len(v.shape))
)
feed[k] = np.pad(v, pad_size, 'constant', constant_values=0)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,24 +62,20 @@ def is_program_valid(self, prog_config):
if (
int(
(
(
input_shape[2]
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
input_shape[2]
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
)
<= 0
or int(
(
(
input_shape[3]
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
input_shape[3]
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
)
<= 0
):
Expand All @@ -88,28 +84,24 @@ def is_program_valid(self, prog_config):
if (
int(
(
(
input_shape[2]
+ paddings[0]
+ paddings[1]
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
input_shape[2]
+ paddings[0]
+ paddings[1]
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
)
<= 0
or int(
(
(
input_shape[3]
+ paddings[2]
+ paddings[3]
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
input_shape[3]
+ paddings[2]
+ paddings[3]
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
)
<= 0
):
Expand Down Expand Up @@ -206,27 +198,23 @@ def sample_program_config(self, draw):
f_shape[0],
int(
(
(
x_shape[2]
+ padding[0]
+ padding[1]
- (dilations[0] * (f_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
x_shape[2]
+ padding[0]
+ padding[1]
- (dilations[0] * (f_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
),
int(
(
(
x_shape[3]
+ padding[2]
+ padding[3]
- (dilations[1] * (f_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
x_shape[3]
+ padding[2]
+ padding[3]
- (dilations[1] * (f_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
),
]

Expand All @@ -241,18 +229,14 @@ def sample_program_config(self, draw):
x_shape[0],
f_shape[0],
int(
(
(x_shape[2] - (dilations[0] * (f_shape[2] - 1) + 1))
/ strides[0]
+ 1
)
(x_shape[2] - (dilations[0] * (f_shape[2] - 1) + 1))
/ strides[0]
+ 1
),
int(
(
(x_shape[3] - (dilations[1] * (f_shape[3] - 1) + 1))
/ strides[1]
+ 1
)
(x_shape[3] - (dilations[1] * (f_shape[3] - 1) + 1))
/ strides[1]
+ 1
),
]
bias_index = 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def generate_alpha(*args, **kwargs):
elif kwargs["mode"] == "channel":
if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0
return np.zeros((1)).astype(np.float32)
return np.zeros(1).astype(np.float32)
if kwargs['data_format'] == 'NCHW':
return np.random.random(kwargs['in_shape'][1]).astype(
np.float32
Expand All @@ -54,7 +54,7 @@ def generate_alpha(*args, **kwargs):
else:
if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0
return np.zeros((1)).astype(np.float32)
return np.zeros(1).astype(np.float32)
return np.random.random(kwargs['in_shape']).astype(np.float32)

prelu_op = OpConfig(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def sample_program_configs(self):

def generate_input1(dims, batch):
if dims == 1:
return np.zeros((batch)).astype(np.float32)
return np.zeros(batch).astype(np.float32)
elif dims == 2:
return np.ones((batch, 4)).astype(np.float32)
elif dims == 3:
Expand All @@ -45,7 +45,7 @@ def generate_input1(dims, batch):

def generate_input2(dims, batch):
if dims == 1:
return np.zeros((batch)).astype(np.float32)
return np.zeros(batch).astype(np.float32)
elif dims == 2:
return np.ones((batch, 4)).astype(np.float32)
elif dims == 3:
Expand All @@ -55,7 +55,7 @@ def generate_input2(dims, batch):

def generate_input3(dims, batch):
if dims == 1:
return np.zeros((batch)).astype(np.float32)
return np.zeros(batch).astype(np.float32)
elif dims == 2:
return np.ones((batch, 4)).astype(np.float32)
elif dims == 3:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def calc_expand_shapes_tensor(self):
self.expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape):
self.expand_shapes_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele)
("x" + str(index), np.ones(1).astype('int32') * ele)
)

def set_additional_inputs(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def generate_data(self):

self.out_float = np.dot(self.x_float, self.y_float)
if self.use_bias:
self.bias = np.random.random((10)).astype("float32") * 10
self.bias = np.random.random(10).astype("float32") * 10
self.out_float += self.bias

self.out_scale, self.out = self.quantize(self.out_float)
Expand Down
Loading

0 comments on commit c069729

Please sign in to comment.