Skip to content

Commit

Permalink
[CodeStyle][ruff] fix E226, NPY201 (PaddlePaddle#60245)
Browse files Browse the repository at this point in the history
* fix ruff preview

* fix cast
  • Loading branch information
gouzil authored and Wanglongzhi2001 committed Jan 7, 2024
1 parent d8d2013 commit d738599
Show file tree
Hide file tree
Showing 11 changed files with 28 additions and 23 deletions.
4 changes: 3 additions & 1 deletion python/paddle/distributed/launch/controllers/collective.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@ def _build_pod_with_args(self):
ips = self.ctx.args.ips.split(',')

job_endpoints = [
f"{h}:{p+start_port}" for h in ips for p in range(self.pod.replicas)
f"{h}:{p + start_port}"
for h in ips
for p in range(self.pod.replicas)
]

self.ctx.logger.debug(f"job endpoints: {job_endpoints}")
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/distributed/launch/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,10 +576,10 @@ def launch():

end_time = time.time()
ctx.logger.info(
f"AtuoTuner for GBS search ends in {end_time-start_time}s."
f"AtuoTuner for GBS search ends in {end_time - start_time}s."
)
logger.info(
f"AtuoTuner for GBS search ends in {end_time-start_time}s."
f"AtuoTuner for GBS search ends in {end_time - start_time}s."
)

# build AutoTuner to get new config
Expand Down Expand Up @@ -1118,8 +1118,8 @@ def launch():
assert best_cfg and best_cfg["time"] != -1

end_time = time.time()
ctx.logger.info(f"AutoTuner ended in {end_time-start_time}s.")
logger.info(f"AutoTuner ended in {end_time-start_time}s.")
ctx.logger.info(f"AutoTuner ended in {end_time - start_time}s.")
logger.info(f"AutoTuner ended in {end_time - start_time}s.")
# launch best cfg
# estimation search need not run best cfg
if not tuner_cfg.get("run_best", True) or tuner_cfg["search_algo"].get(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/hapi/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1279,10 +1279,10 @@ def _reset(self):
self.mode == 'auto' and 'acc' not in self.monitor
):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
self.best = np.inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.best = -np.inf
self.cooldown_counter = 0
self.wait = 0

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,7 @@ def error_message_summary(original_error: Exception) -> str:
)
if current_line != -1:
message_lines.append(
f"{indent} {lines[current_line-start].rstrip()}"
f"{indent} {lines[current_line - start].rstrip()}"
)
error_message = traceback.format_exception_only(
type(original_error), original_error
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/static/quantization/quant2_int8_mkldnn_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def _gather_input_scales_from_fake(self, graph):
scale = np.array(
1.0 / self._load_param(self._scope, scale_name)[0]
).astype(np.float64)
scale[scale == np.Inf] = 0.0
scale[scale == np.inf] = 0.0
lod_tensor = self._convert_scale2tensor(scale)
use_unsigned_int = False
self._add_scale_for_vars(
Expand Down Expand Up @@ -238,7 +238,7 @@ def _gather_output_scales_from_attr(self, graph):
if attr_scale == 0.0:
continue
scale = np.array(1.0 / attr_scale).astype(np.float64)
scale[scale == np.Inf] = 0.0
scale[scale == np.inf] = 0.0
scale_lod_tensor = self._convert_scale2tensor(scale)
use_unsigned_int = False
for output_name in op.op().outputs():
Expand Down Expand Up @@ -561,7 +561,7 @@ def _compute_var_scales(ops, w_name, axis):
),
axis=axis,
)
scales[scales == np.Inf] = 0.0
scales[scales == np.inf] = 0.0

lod_tensor = self._convert_scale2tensor(scales)
use_unsigned_int = False
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/tensor/einsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -672,7 +672,7 @@ def plan_einsum(operands, g_view, g_shape, g_supports, g_count, n_bcast):
if any(ax != dim for ax, dim in enumerate(view[:nout])):
perm = [dim for dim in view if dim >= 0]
if sorted(perm) != perm:
varname = f'op{nop-1}'
varname = f'op{nop - 1}'
step = transpose, [varname], varname, perm
plan.add_step(step)
dim = 0
Expand All @@ -684,14 +684,14 @@ def plan_einsum(operands, g_view, g_shape, g_supports, g_count, n_bcast):
if d == -1:
unsqueeze_dims.append(ax)
if unsqueeze_dims:
varname = f'op{nop-1}'
varname = f'op{nop - 1}'
step = unsqueeze, [varname], varname, unsqueeze_dims
plan.add_step(step)

squeeze_dims = [dim for dim in view[nout:] if dim != -1]
if squeeze_dims:
# plan_reduce(plan, nop-1, reduce_dims, keepdim=False)
varname = f'op{nop-1}'
varname = f'op{nop - 1}'
step = squeeze, [varname], varname, squeeze_dims
plan.add_step(step)

Expand Down
6 changes: 3 additions & 3 deletions test/custom_runtime/test_custom_cpu_to_static.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def train_func_base(epoch_id, train_loader, model, cost, optimizer):
)
epoch_end = time.time()
print(
f"Epoch ID: {epoch_id+1}, FP32 train epoch time: {(epoch_end - epoch_start) * 1000} ms"
f"Epoch ID: {epoch_id + 1}, FP32 train epoch time: {(epoch_end - epoch_start) * 1000} ms"
)


Expand Down Expand Up @@ -75,7 +75,7 @@ def train_func_ampo1(epoch_id, train_loader, model, cost, optimizer, scaler):
)
epoch_end = time.time()
print(
f"Epoch ID: {epoch_id+1}, AMPO1 train epoch time: {(epoch_end - epoch_start) * 1000} ms"
f"Epoch ID: {epoch_id + 1}, AMPO1 train epoch time: {(epoch_end - epoch_start) * 1000} ms"
)


Expand All @@ -96,7 +96,7 @@ def test_func(epoch_id, test_loader, model, cost):
avg_acc[1].append(acc_top5.numpy())
model.train()
print(
f"Epoch ID: {epoch_id+1}, Top1 accurary: {np.array(avg_acc[0]).mean()}, Top5 accurary: {np.array(avg_acc[1]).mean()}"
f"Epoch ID: {epoch_id + 1}, Top1 accurary: {np.array(avg_acc[0]).mean()}, Top5 accurary: {np.array(avg_acc[1]).mean()}"
)


Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_fill_constant_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,9 +413,9 @@ def test_inf(self):

def test_ninf(self):
with base.dygraph.guard():
res = paddle.tensor.fill_constant([1], 'float32', np.NINF)
res = paddle.tensor.fill_constant([1], 'float32', -np.inf)
self.assertTrue(np.isinf(res.numpy().item(0)))
self.assertEqual(np.NINF, res.numpy().item(0))
self.assertEqual(-np.inf, res.numpy().item(0))


class TestFillConstantOpError(unittest.TestCase):
Expand Down
5 changes: 4 additions & 1 deletion test/legacy_test/test_seed_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,10 @@ def check_static_result(self, place):
(out1,) = exe.run(
static.default_main_program(), fetch_list=res_list
)
self.assertEqual(out1, np.cast['int32'](self.rng1.random()))
self.assertEqual(
out1,
np.asarray(self.rng1.random()).astype(np.int32),
)

def test_static(self):
for place in self.places:
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_increment_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def setUp(self):
self.initTestCase()

x = np.random.uniform(-100, 100, [1]).astype(self.dtype)
output = x + np.cast[self.dtype](self.step)
output = x + np.asarray(self.step).astype(self.dtype)
output = output.astype(self.dtype)

self.inputs = {'X': x}
Expand Down
4 changes: 2 additions & 2 deletions tools/gen_ut_cmakelists.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def _init_dist_ut_ports_from_cmakefile(self, cmake_file_name):
assert re.compile("^test_[0-9a-zA-Z_]+").search(
name
), f'''we found a test for initial the latest dist_port but the test name '{name}' seems to be wrong
at line {k-1}, in file {cmake_file_name}
at line {k - 1}, in file {cmake_file_name}
'''
self.gset_port(name, port)

Expand Down Expand Up @@ -559,7 +559,7 @@ def _gen_cmakelists(self, current_work_dir, depth=0):
print("===============PARSE LINE ERRORS OCCUR==========")
print(e)
print(f"[ERROR FILE]: {current_work_dir}/testslist.csv")
print(f"[ERROR LINE {i+1}]: {line.strip()}")
print(f"[ERROR LINE {i + 1}]: {line.strip()}")
sys.exit(1)

for sub in sub_dirs:
Expand Down

0 comments on commit d738599

Please sign in to comment.