From 1da2891c7bf8e4cb60d29430e18fa96111f0f401 Mon Sep 17 00:00:00 2001 From: Ricardo Vieira Date: Sun, 14 May 2023 12:07:02 +0200 Subject: [PATCH] Add flake8-comprehensions plugin --- .pre-commit-config.yaml | 2 ++ pytensor/compile/builders.py | 4 +--- pytensor/gradient.py | 6 +++--- pytensor/graph/basic.py | 2 +- pytensor/graph/replace.py | 2 +- pytensor/graph/rewriting/basic.py | 4 ++-- pytensor/link/c/basic.py | 4 ++-- pytensor/link/c/params_type.py | 2 +- pytensor/link/jax/dispatch/elemwise.py | 2 +- pytensor/link/numba/dispatch/elemwise.py | 2 +- pytensor/printing.py | 2 +- pytensor/scan/basic.py | 4 ++-- pytensor/scan/op.py | 13 +++++-------- pytensor/scan/rewriting.py | 2 +- pytensor/tensor/random/op.py | 2 +- pytensor/tensor/rewriting/math.py | 6 +++--- pytensor/tensor/shape.py | 6 +++--- pytensor/tensor/subtensor.py | 2 +- setup.cfg | 2 +- tests/graph/test_features.py | 4 ++-- tests/graph/test_op.py | 2 +- tests/tensor/rewriting/test_elemwise.py | 2 +- tests/tensor/rewriting/test_math.py | 6 +++--- tests/tensor/rewriting/test_subtensor.py | 4 ++-- tests/tensor/test_blas.py | 6 +++--- tests/tensor/test_complex.py | 4 +--- tests/tensor/test_elemwise.py | 18 +++++++++--------- 27 files changed, 55 insertions(+), 60 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83e57cd58a..f6f8fd556f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,6 +33,8 @@ repos: rev: 6.0.0 hooks: - id: flake8 + additional_dependencies: + - flake8-comprehensions - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: diff --git a/pytensor/compile/builders.py b/pytensor/compile/builders.py index 4980cebe30..78f94a877f 100644 --- a/pytensor/compile/builders.py +++ b/pytensor/compile/builders.py @@ -969,9 +969,7 @@ def inline_ofg_expansion(fgraph, node): return False if not op.is_inline: return False - return clone_replace( - op.inner_outputs, {u: v for u, v in zip(op.inner_inputs, node.inputs)} - ) + return clone_replace(op.inner_outputs, dict(zip(op.inner_inputs, node.inputs))) # We want to run this before the first merge optimizer diff --git a/pytensor/gradient.py b/pytensor/gradient.py index 930acfbdb3..976d79e1b2 100644 --- a/pytensor/gradient.py +++ b/pytensor/gradient.py @@ -504,7 +504,7 @@ def grad( if not isinstance(wrt, Sequence): _wrt: List[Variable] = [wrt] else: - _wrt = [x for x in wrt] + _wrt = list(wrt) outputs = [] if cost is not None: @@ -791,8 +791,8 @@ def subgraph_grad(wrt, end, start=None, cost=None, details=False): pgrads = dict(zip(params, grads)) # separate wrt from end grads: - wrt_grads = list(pgrads[k] for k in wrt) - end_grads = list(pgrads[k] for k in end) + wrt_grads = [pgrads[k] for k in wrt] + end_grads = [pgrads[k] for k in end] if details: return wrt_grads, end_grads, start_grads, cost_grads diff --git a/pytensor/graph/basic.py b/pytensor/graph/basic.py index 31bd8d311a..7e59bf69e1 100644 --- a/pytensor/graph/basic.py +++ b/pytensor/graph/basic.py @@ -1629,7 +1629,7 @@ def as_string( multi.add(op2) else: seen.add(input.owner) - multi_list = [x for x in multi] + multi_list = list(multi) done: Set = set() def multi_index(x): diff --git a/pytensor/graph/replace.py b/pytensor/graph/replace.py index 222716c9b0..2213c70578 100644 --- a/pytensor/graph/replace.py +++ b/pytensor/graph/replace.py @@ -142,7 +142,7 @@ def toposort_key(fg: FunctionGraph, ts, pair): raise ValueError(f"{key} is not a part of graph") sorted_replacements = sorted( - tuple(fg_replace.items()), + fg_replace.items(), # sort based on the fg toposort, if a variable has no owner, it goes first key=partial(toposort_key, fg, toposort), reverse=True, diff --git a/pytensor/graph/rewriting/basic.py b/pytensor/graph/rewriting/basic.py index 158d9322c0..b2bfcffaf3 100644 --- a/pytensor/graph/rewriting/basic.py +++ b/pytensor/graph/rewriting/basic.py @@ -2575,8 +2575,8 @@ def print_profile(cls, stream, prof, level=0): for i in range(len(loop_timing)): loop_times = "" if loop_process_count[i]: - d = list( - reversed(sorted(loop_process_count[i].items(), key=lambda a: a[1])) + d = sorted( + loop_process_count[i].items(), key=lambda a: a[1], reverse=True ) loop_times = " ".join([str((str(k), v)) for k, v in d[:5]]) if len(d) > 5: diff --git a/pytensor/link/c/basic.py b/pytensor/link/c/basic.py index c0ea66bd36..ab61e4e4bd 100644 --- a/pytensor/link/c/basic.py +++ b/pytensor/link/c/basic.py @@ -633,11 +633,11 @@ def fetch_variables(self): # The orphans field is listified to ensure a consistent order. # list(fgraph.orphans.difference(self.outputs)) - self.orphans = list( + self.orphans = [ r for r in self.variables if isinstance(r, AtomicVariable) and r not in self.inputs - ) + ] # C type constants (pytensor.scalar.ScalarType). They don't request an object self.consts = [] # Move c type from orphans (pytensor.scalar.ScalarType) to self.consts diff --git a/pytensor/link/c/params_type.py b/pytensor/link/c/params_type.py index 456346d8b5..ffa57b0949 100644 --- a/pytensor/link/c/params_type.py +++ b/pytensor/link/c/params_type.py @@ -810,7 +810,7 @@ def c_support_code(self, **kwargs): struct_extract_method=struct_extract_method, ) - return list(sorted(list(c_support_code_set))) + [final_struct_code] + return sorted(c_support_code_set) + [final_struct_code] def c_code_cache_version(self): return ((3,), tuple(t.c_code_cache_version() for t in self.types)) diff --git a/pytensor/link/jax/dispatch/elemwise.py b/pytensor/link/jax/dispatch/elemwise.py index 2c2204e04d..39ef836b6a 100644 --- a/pytensor/link/jax/dispatch/elemwise.py +++ b/pytensor/link/jax/dispatch/elemwise.py @@ -41,7 +41,7 @@ def careduce(x): elif scalar_op_name: scalar_fn_name = scalar_op_name - to_reduce = reversed(sorted(axis)) + to_reduce = sorted(axis, reverse=True) if to_reduce: # In this case, we need to use the `jax.lax` function (if there diff --git a/pytensor/link/numba/dispatch/elemwise.py b/pytensor/link/numba/dispatch/elemwise.py index df8aeefbdd..49f7d6e8a2 100644 --- a/pytensor/link/numba/dispatch/elemwise.py +++ b/pytensor/link/numba/dispatch/elemwise.py @@ -361,7 +361,7 @@ def careduce_maximum(input): careduce_fn_name = f"careduce_{scalar_op}" global_env = {} - to_reduce = reversed(sorted(axes)) + to_reduce = sorted(axes, reverse=True) careduce_lines_src = [] var_name = input_name diff --git a/pytensor/printing.py b/pytensor/printing.py index 5cbea65f33..0b866f079e 100644 --- a/pytensor/printing.py +++ b/pytensor/printing.py @@ -796,7 +796,7 @@ def grad(self, input, output_gradients): return output_gradients def R_op(self, inputs, eval_points): - return [x for x in eval_points] + return list(eval_points) def __setstate__(self, dct): dct.setdefault("global_fn", _print_fn) diff --git a/pytensor/scan/basic.py b/pytensor/scan/basic.py index d5109a0a9c..ddb4983d4c 100644 --- a/pytensor/scan/basic.py +++ b/pytensor/scan/basic.py @@ -492,7 +492,7 @@ def wrap_into_list(x): # wrap sequences in a dictionary if they are not already dictionaries for i in range(n_seqs): if not isinstance(seqs[i], dict): - seqs[i] = dict([("input", seqs[i]), ("taps", [0])]) + seqs[i] = {"input": seqs[i], "taps": [0]} elif seqs[i].get("taps", None) is not None: seqs[i]["taps"] = wrap_into_list(seqs[i]["taps"]) elif seqs[i].get("taps", None) is None: @@ -504,7 +504,7 @@ def wrap_into_list(x): if outs_info[i] is not None: if not isinstance(outs_info[i], dict): # by default any output has a tap value of -1 - outs_info[i] = dict([("initial", outs_info[i]), ("taps", [-1])]) + outs_info[i] = {"initial": outs_info[i], "taps": [-1]} elif ( outs_info[i].get("initial", None) is None and outs_info[i].get("taps", None) is not None diff --git a/pytensor/scan/op.py b/pytensor/scan/op.py index 8942ad7e6f..b35cfa9b7b 100644 --- a/pytensor/scan/op.py +++ b/pytensor/scan/op.py @@ -1718,12 +1718,9 @@ def perform(self, node, inputs, output_storage, params=None): arg.shape[0] for arg in inputs[self.seqs_arg_offset : self.shared_arg_offset] ] - store_steps += [ - arg - for arg in inputs[ - self.nit_sot_arg_offset : self.nit_sot_arg_offset + info.n_nit_sot - ] - ] + store_steps += list( + inputs[self.nit_sot_arg_offset : self.nit_sot_arg_offset + info.n_nit_sot] + ) # 2.1 Create storage space for outputs for idx in range(self.n_outs): @@ -2270,7 +2267,7 @@ def infer_shape(self, fgraph, node, input_shapes): ) offset = 1 + info.n_seqs - scan_outs = [x for x in input_shapes[offset : offset + n_outs]] + scan_outs = list(input_shapes[offset : offset + n_outs]) offset += n_outs outs_shape_n = info.n_mit_mot_outs + info.n_mit_sot + info.n_sit_sot for x in range(info.n_nit_sot): @@ -2301,7 +2298,7 @@ def infer_shape(self, fgraph, node, input_shapes): shp.append(v_shp_i[0]) scan_outs.append(tuple(shp)) - scan_outs += [x for x in input_shapes[offset : offset + info.n_shared_outs]] + scan_outs += list(input_shapes[offset : offset + info.n_shared_outs]) # if we are dealing with a repeat-until, then we do not know the # leading dimension so we replace it for every entry with Shape_i if info.as_while: diff --git a/pytensor/scan/rewriting.py b/pytensor/scan/rewriting.py index 797d4c4062..217693d0ae 100644 --- a/pytensor/scan/rewriting.py +++ b/pytensor/scan/rewriting.py @@ -388,7 +388,7 @@ def add_to_replace(y): if out in local_fgraph_outs_set: x = node.outputs[local_fgraph_outs_map[out]] y = replace_with_out[idx] - y_shape = [shp for shp in y.shape] + y_shape = list(y.shape) replace_with[x] = at.alloc(y, node.inputs[0], *y_shape) # We need to add one extra dimension to the outputs diff --git a/pytensor/tensor/random/op.py b/pytensor/tensor/random/op.py index 5a3b4aea19..1e4e44274f 100644 --- a/pytensor/tensor/random/op.py +++ b/pytensor/tensor/random/op.py @@ -283,7 +283,7 @@ def infer_shape(self, fgraph, node, input_shapes): shape = self._infer_shape(size, dist_params, param_shapes=param_shapes) - return [None, [s for s in shape]] + return [None, list(shape)] def __call__(self, *args, size=None, name=None, rng=None, dtype=None, **kwargs): res = super().__call__(rng, size, dtype, *args, **kwargs) diff --git a/pytensor/tensor/rewriting/math.py b/pytensor/tensor/rewriting/math.py index 9952b09fba..cf604eec1b 100644 --- a/pytensor/tensor/rewriting/math.py +++ b/pytensor/tensor/rewriting/math.py @@ -1555,11 +1555,11 @@ def local_sum_prod_div_dimshuffle(fgraph, node): ) if len(compatible_dims) > 0: - optimized_dimshuffle_order = list( + optimized_dimshuffle_order = [ ax for i, ax in enumerate(dimshuffle_order) if (i not in axis) or (ax != "x") - ) + ] # Removing leading 'x' (since it will be done automatically) while ( @@ -1644,7 +1644,7 @@ def local_op_of_op(fgraph, node): return [op_type(None, dtype=out_dtype)(node_inps.owner.inputs[0])] # figure out which axes were in the original sum - newaxis = list(tuple(node_inps.owner.op.axis)) + newaxis = list(node_inps.owner.op.axis) for i in node.op.axis: new_i = i for ii in node_inps.owner.op.axis: diff --git a/pytensor/tensor/shape.py b/pytensor/tensor/shape.py index ffb407a121..2e273a1dce 100644 --- a/pytensor/tensor/shape.py +++ b/pytensor/tensor/shape.py @@ -810,7 +810,7 @@ def shape_padleft(t, n_ones=1): """ _t = at.as_tensor_variable(t) - pattern = ["x"] * n_ones + [i for i in range(_t.type.ndim)] + pattern = ["x"] * n_ones + list(range(_t.type.ndim)) return _t.dimshuffle(pattern) @@ -826,7 +826,7 @@ def shape_padright(t, n_ones=1): """ _t = at.as_tensor_variable(t) - pattern = [i for i in range(_t.type.ndim)] + ["x"] * n_ones + pattern = list(range(_t.type.ndim)) + ["x"] * n_ones return _t.dimshuffle(pattern) @@ -861,7 +861,7 @@ def shape_padaxis(t, axis): if axis < 0: axis += ndim - pattern = [i for i in range(_t.type.ndim)] + pattern = list(range(_t.type.ndim)) pattern.insert(axis, "x") return _t.dimshuffle(pattern) diff --git a/pytensor/tensor/subtensor.py b/pytensor/tensor/subtensor.py index b14fa242b2..99c335348b 100644 --- a/pytensor/tensor/subtensor.py +++ b/pytensor/tensor/subtensor.py @@ -2604,7 +2604,7 @@ def infer_shape(self, fgraph, node, ishapes): ishapes[0], index_shapes, indices_are_shapes=True ) assert node.outputs[0].ndim == len(res_shape) - return [[s for s in res_shape]] + return [list(res_shape)] def perform(self, node, inputs, out_): (out,) = out_ diff --git a/setup.cfg b/setup.cfg index 5bb5b94dfa..554bfda88a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [flake8] select = C,E,F,W -ignore = E203,E231,E501,E741,W503,W504,C901 +ignore = E203,E231,E501,E741,W503,W504,C408,C901 per-file-ignores = **/__init__.py:F401,E402,F403 pytensor/tensor/linalg.py:F401,F403 diff --git a/tests/graph/test_features.py b/tests/graph/test_features.py index cfd0bf3f78..475af20b57 100644 --- a/tests/graph/test_features.py +++ b/tests/graph/test_features.py @@ -73,7 +73,7 @@ def inputs(): assert hasattr(g, "get_nodes") for type, num in ((add, 3), (sigmoid, 3), (dot, 2)): - if len([t for t in g.get_nodes(type)]) != num: + if len(list(g.get_nodes(type))) != num: raise Exception("Expected: %i times %s" % (num, type)) new_e0 = add(y, z) assert e0.owner in g.get_nodes(dot) @@ -82,7 +82,7 @@ def inputs(): assert e0.owner not in g.get_nodes(dot) assert new_e0.owner in g.get_nodes(add) for type, num in ((add, 4), (sigmoid, 3), (dot, 1)): - if len([t for t in g.get_nodes(type)]) != num: + if len(list(g.get_nodes(type))) != num: raise Exception("Expected: %i times %s" % (num, type)) diff --git a/tests/graph/test_op.py b/tests/graph/test_op.py index ba9ebfe27a..40a8c0661c 100644 --- a/tests/graph/test_op.py +++ b/tests/graph/test_op.py @@ -87,7 +87,7 @@ def test_sanity_0(self): r1, r2 = MyType(1)(), MyType(2)() node = MyOp.make_node(r1, r2) # Are the inputs what I provided? - assert [x for x in node.inputs] == [r1, r2] + assert list(node.inputs) == [r1, r2] # Are the outputs what I expect? assert [x.type for x in node.outputs] == [MyType(3)] assert node.outputs[0].owner is node and node.outputs[0].index == 0 diff --git a/tests/tensor/rewriting/test_elemwise.py b/tests/tensor/rewriting/test_elemwise.py index c30ed12f89..ddec8c5292 100644 --- a/tests/tensor/rewriting/test_elemwise.py +++ b/tests/tensor/rewriting/test_elemwise.py @@ -1123,7 +1123,7 @@ def test_add_mul_fusion_inplace(self): out = dot(x, y) + x + y + z f = function([x, y, z], out, mode=self.mode) - topo = [n for n in f.maker.fgraph.toposort()] + topo = list(f.maker.fgraph.toposort()) assert len(topo) == 2 assert topo[-1].op.inplace_pattern diff --git a/tests/tensor/rewriting/test_math.py b/tests/tensor/rewriting/test_math.py index 74250cd81b..f191e51357 100644 --- a/tests/tensor/rewriting/test_math.py +++ b/tests/tensor/rewriting/test_math.py @@ -3994,9 +3994,9 @@ def test_is_1pexp(self): exp_op = exp assert is_1pexp(1 + exp_op(x), False) == (False, x) assert is_1pexp(exp_op(x) + 1, False) == (False, x) - for neg_, exp_arg in map( - lambda x: is_1pexp(x, only_process_constants=False), - [(1 + exp_op(-x)), (exp_op(-x) + 1)], + for neg_, exp_arg in ( + is_1pexp(x, only_process_constants=False) + for x in [(1 + exp_op(-x)), (exp_op(-x) + 1)] ): assert not neg_ and is_same_graph(exp_arg, -x) assert is_1pexp(1 - exp_op(x), False) is None diff --git a/tests/tensor/rewriting/test_subtensor.py b/tests/tensor/rewriting/test_subtensor.py index d72a49d80c..a5a643d0da 100644 --- a/tests/tensor/rewriting/test_subtensor.py +++ b/tests/tensor/rewriting/test_subtensor.py @@ -2004,7 +2004,7 @@ def test_local_subtensor_SpecifyShape_lift(x, s, idx, x_val, s_val): y_val_fn = function( [x] + list(s), y, on_unused_input="ignore", mode=no_rewrites_mode ) - y_val = y_val_fn(*([x_val] + [s_ for s_ in s_val])) + y_val = y_val_fn(*([x_val] + list(s_val))) # This optimization should appear in the canonicalizations y_opt = rewrite_graph(y, clone=False) @@ -2017,7 +2017,7 @@ def test_local_subtensor_SpecifyShape_lift(x, s, idx, x_val, s_val): assert isinstance(y_opt.owner.op, SpecifyShape) y_opt_fn = function([x] + list(s), y_opt, on_unused_input="ignore") - y_opt_val = y_opt_fn(*([x_val] + [s_ for s_ in s_val])) + y_opt_val = y_opt_fn(*([x_val] + list(s_val))) assert np.allclose(y_val, y_opt_val) diff --git a/tests/tensor/test_blas.py b/tests/tensor/test_blas.py index e884583e9e..0ce7640d38 100644 --- a/tests/tensor/test_blas.py +++ b/tests/tensor/test_blas.py @@ -2589,10 +2589,10 @@ def test_ger(self): op=batched_dot, expected=( lambda xs, ys: np.asarray( - list( + [ x * y if x.ndim == 0 or y.ndim == 0 else np.dot(x, y) for x, y in zip(xs, ys) - ), + ], dtype=aes.upcast(xs.dtype, ys.dtype), ) ), @@ -2694,7 +2694,7 @@ def check_first_dim(inverted): assert x.strides[0] == direction * np.dtype(config.floatX).itemsize assert not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"]) result = f(x, w) - ref_result = np.asarray(list(np.dot(u, v) for u, v in zip(x, w))) + ref_result = np.asarray([np.dot(u, v) for u, v in zip(x, w)]) utt.assert_allclose(ref_result, result) for inverted in (0, 1): diff --git a/tests/tensor/test_complex.py b/tests/tensor/test_complex.py index 593418da31..1f29f198ca 100644 --- a/tests/tensor/test_complex.py +++ b/tests/tensor/test_complex.py @@ -15,9 +15,7 @@ def test_basic(self): x = zvector() rng = np.random.default_rng(23) xval = np.asarray( - list( - complex(rng.standard_normal(), rng.standard_normal()) for i in range(10) - ) + [complex(rng.standard_normal(), rng.standard_normal()) for i in range(10)] ) assert np.all(xval.real == pytensor.function([x], real(x))(xval)) assert np.all(xval.imag == pytensor.function([x], imag(x))(xval)) diff --git a/tests/tensor/test_elemwise.py b/tests/tensor/test_elemwise.py index 9bb1fd3154..8db1afe7e3 100644 --- a/tests/tensor/test_elemwise.py +++ b/tests/tensor/test_elemwise.py @@ -490,50 +490,50 @@ def with_mode( assert len(axis2) == len(tosum) tosum = tuple(axis2) if tensor_op == at_all: - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.all(zv, axis) if len(tosum) == 0: zv = zv != 0 elif tensor_op == at_any: - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.any(zv, axis) if len(tosum) == 0: zv = zv != 0 elif scalar_op == aes.add: - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.add.reduce(zv, axis) if dtype == "bool": # np.add of a bool upcast, while CAReduce don't zv = zv.astype(dtype) elif scalar_op == aes.mul: - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.multiply.reduce(zv, axis) elif scalar_op == aes.scalar_maximum: # There is no identity value for the maximum function # So we can't support shape of dimensions 0. if np.prod(zv.shape) == 0: continue - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.maximum.reduce(zv, axis) elif scalar_op == aes.scalar_minimum: # There is no identity value for the minimum function # So we can't support shape of dimensions 0. if np.prod(zv.shape) == 0: continue - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.minimum.reduce(zv, axis) elif scalar_op == aes.or_: - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.bitwise_or.reduce(zv, axis) elif scalar_op == aes.and_: - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = reduce_bitwise_and(zv, axis, dtype=dtype) elif scalar_op == aes.xor: # There is no identity value for the xor function # So we can't support shape of dimensions 0. if np.prod(zv.shape) == 0: continue - for axis in reversed(sorted(tosum)): + for axis in sorted(tosum, reverse=True): zv = np.bitwise_xor.reduce(zv, axis) else: raise NotImplementedError(