Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for ruff #295

Merged
merged 3 commits into from
May 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ repos:
rev: 6.0.0
hooks:
- id: flake8
additional_dependencies:
- flake8-comprehensions
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
Expand Down
28 changes: 28 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,34 @@ skip_gitignore = true
skip = "pytensor/version.py"
skip_glob = "**/*.pyx"

[tool.ruff]
select=["C","E","F","W"]
ignore=["E501","E741","C408","C901"]
exclude = [
"doc/",
"pytensor/_version.py",
"bin/pytensor_cache.py",
]

[tool.ruff.per-file-ignores]
ricardoV94 marked this conversation as resolved.
Show resolved Hide resolved
# TODO: Get rid of these:
"**/__init__.py"=["F401","E402","F403"]
"pytensor/tensor/linalg.py"=["F401","F403"]
"pytensor/scalar/basic_scipy.py"=["E402","F403","F401"]
"pytensor/graph/toolbox.py"=["E402","F403","F401"]
"pytensor/link/jax/jax_dispatch.py"=["E402","F403","F401"]
"pytensor/link/jax/jax_linker.py"=["E402","F403","F401"]
"pytensor/sparse/sandbox/sp2.py"=["F401"]
"tests/tensor/test_math_scipy.py"=["E402"]
"tests/sparse/test_basic.py"=["E402"]
"tests/sparse/test_opt.py"=["E402"]
"tests/sparse/test_sp2.py"=["E402"]
"tests/sparse/test_utils.py"=["E402","F401"]
"tests/sparse/sandbox/test_sp.py"=["E402","F401"]
"tests/scalar/test_basic_sympy.py"=["E402"]
"pytensor/graph/rewriting/unify.py"=["F811"]


[tool.mypy]
ignore_missing_imports = true
no_implicit_optional = true
Expand Down
4 changes: 1 addition & 3 deletions pytensor/compile/builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -969,9 +969,7 @@ def inline_ofg_expansion(fgraph, node):
return False
if not op.is_inline:
return False
return clone_replace(
op.inner_outputs, {u: v for u, v in zip(op.inner_inputs, node.inputs)}
)
return clone_replace(op.inner_outputs, dict(zip(op.inner_inputs, node.inputs)))


# We want to run this before the first merge optimizer
Expand Down
6 changes: 3 additions & 3 deletions pytensor/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ def grad(
if not isinstance(wrt, Sequence):
_wrt: List[Variable] = [wrt]
else:
_wrt = [x for x in wrt]
_wrt = list(wrt)

outputs = []
if cost is not None:
Expand Down Expand Up @@ -791,8 +791,8 @@ def subgraph_grad(wrt, end, start=None, cost=None, details=False):

pgrads = dict(zip(params, grads))
# separate wrt from end grads:
wrt_grads = list(pgrads[k] for k in wrt)
end_grads = list(pgrads[k] for k in end)
wrt_grads = [pgrads[k] for k in wrt]
end_grads = [pgrads[k] for k in end]

if details:
return wrt_grads, end_grads, start_grads, cost_grads
Expand Down
2 changes: 1 addition & 1 deletion pytensor/graph/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1629,7 +1629,7 @@ def as_string(
multi.add(op2)
else:
seen.add(input.owner)
multi_list = [x for x in multi]
multi_list = list(multi)
done: Set = set()

def multi_index(x):
Expand Down
2 changes: 1 addition & 1 deletion pytensor/graph/replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def toposort_key(fg: FunctionGraph, ts, pair):
raise ValueError(f"{key} is not a part of graph")

sorted_replacements = sorted(
tuple(fg_replace.items()),
fg_replace.items(),
# sort based on the fg toposort, if a variable has no owner, it goes first
key=partial(toposort_key, fg, toposort),
reverse=True,
Expand Down
4 changes: 2 additions & 2 deletions pytensor/graph/rewriting/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2575,8 +2575,8 @@ def print_profile(cls, stream, prof, level=0):
for i in range(len(loop_timing)):
loop_times = ""
if loop_process_count[i]:
d = list(
reversed(sorted(loop_process_count[i].items(), key=lambda a: a[1]))
d = sorted(
ricardoV94 marked this conversation as resolved.
Show resolved Hide resolved
loop_process_count[i].items(), key=lambda a: a[1], reverse=True
)
loop_times = " ".join([str((str(k), v)) for k, v in d[:5]])
if len(d) > 5:
Expand Down
4 changes: 2 additions & 2 deletions pytensor/link/c/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -633,11 +633,11 @@ def fetch_variables(self):

# The orphans field is listified to ensure a consistent order.
# list(fgraph.orphans.difference(self.outputs))
self.orphans = list(
self.orphans = [
r
for r in self.variables
if isinstance(r, AtomicVariable) and r not in self.inputs
)
]
# C type constants (pytensor.scalar.ScalarType). They don't request an object
self.consts = []
# Move c type from orphans (pytensor.scalar.ScalarType) to self.consts
Expand Down
2 changes: 1 addition & 1 deletion pytensor/link/c/params_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,7 @@ def c_support_code(self, **kwargs):
struct_extract_method=struct_extract_method,
)

return list(sorted(list(c_support_code_set))) + [final_struct_code]
return sorted(c_support_code_set) + [final_struct_code]

def c_code_cache_version(self):
return ((3,), tuple(t.c_code_cache_version() for t in self.types))
Expand Down
2 changes: 1 addition & 1 deletion pytensor/link/jax/dispatch/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def careduce(x):
elif scalar_op_name:
scalar_fn_name = scalar_op_name

to_reduce = reversed(sorted(axis))
to_reduce = sorted(axis, reverse=True)

if to_reduce:
# In this case, we need to use the `jax.lax` function (if there
Expand Down
2 changes: 1 addition & 1 deletion pytensor/link/numba/dispatch/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ def careduce_maximum(input):

careduce_fn_name = f"careduce_{scalar_op}"
global_env = {}
to_reduce = reversed(sorted(axes))
to_reduce = sorted(axes, reverse=True)
careduce_lines_src = []
var_name = input_name

Expand Down
2 changes: 1 addition & 1 deletion pytensor/printing.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,7 +796,7 @@ def grad(self, input, output_gradients):
return output_gradients

def R_op(self, inputs, eval_points):
return [x for x in eval_points]
return list(eval_points)

def __setstate__(self, dct):
dct.setdefault("global_fn", _print_fn)
Expand Down
4 changes: 2 additions & 2 deletions pytensor/scan/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ def wrap_into_list(x):
# wrap sequences in a dictionary if they are not already dictionaries
for i in range(n_seqs):
if not isinstance(seqs[i], dict):
seqs[i] = dict([("input", seqs[i]), ("taps", [0])])
seqs[i] = {"input": seqs[i], "taps": [0]}
elif seqs[i].get("taps", None) is not None:
seqs[i]["taps"] = wrap_into_list(seqs[i]["taps"])
elif seqs[i].get("taps", None) is None:
Expand All @@ -504,7 +504,7 @@ def wrap_into_list(x):
if outs_info[i] is not None:
if not isinstance(outs_info[i], dict):
# by default any output has a tap value of -1
outs_info[i] = dict([("initial", outs_info[i]), ("taps", [-1])])
outs_info[i] = {"initial": outs_info[i], "taps": [-1]}
elif (
outs_info[i].get("initial", None) is None
and outs_info[i].get("taps", None) is not None
Expand Down
13 changes: 5 additions & 8 deletions pytensor/scan/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1718,12 +1718,9 @@ def perform(self, node, inputs, output_storage, params=None):
arg.shape[0]
for arg in inputs[self.seqs_arg_offset : self.shared_arg_offset]
]
store_steps += [
arg
for arg in inputs[
self.nit_sot_arg_offset : self.nit_sot_arg_offset + info.n_nit_sot
]
]
store_steps += list(
inputs[self.nit_sot_arg_offset : self.nit_sot_arg_offset + info.n_nit_sot]
)

# 2.1 Create storage space for outputs
for idx in range(self.n_outs):
Expand Down Expand Up @@ -2270,7 +2267,7 @@ def infer_shape(self, fgraph, node, input_shapes):
)

offset = 1 + info.n_seqs
scan_outs = [x for x in input_shapes[offset : offset + n_outs]]
scan_outs = list(input_shapes[offset : offset + n_outs])
offset += n_outs
outs_shape_n = info.n_mit_mot_outs + info.n_mit_sot + info.n_sit_sot
for x in range(info.n_nit_sot):
Expand Down Expand Up @@ -2301,7 +2298,7 @@ def infer_shape(self, fgraph, node, input_shapes):
shp.append(v_shp_i[0])
scan_outs.append(tuple(shp))

scan_outs += [x for x in input_shapes[offset : offset + info.n_shared_outs]]
scan_outs += list(input_shapes[offset : offset + info.n_shared_outs])
# if we are dealing with a repeat-until, then we do not know the
# leading dimension so we replace it for every entry with Shape_i
if info.as_while:
Expand Down
2 changes: 1 addition & 1 deletion pytensor/scan/rewriting.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def add_to_replace(y):
if out in local_fgraph_outs_set:
x = node.outputs[local_fgraph_outs_map[out]]
y = replace_with_out[idx]
y_shape = [shp for shp in y.shape]
y_shape = list(y.shape)
replace_with[x] = at.alloc(y, node.inputs[0], *y_shape)

# We need to add one extra dimension to the outputs
Expand Down
2 changes: 1 addition & 1 deletion pytensor/tensor/random/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ def infer_shape(self, fgraph, node, input_shapes):

shape = self._infer_shape(size, dist_params, param_shapes=param_shapes)

return [None, [s for s in shape]]
return [None, list(shape)]

def __call__(self, *args, size=None, name=None, rng=None, dtype=None, **kwargs):
res = super().__call__(rng, size, dtype, *args, **kwargs)
Expand Down
6 changes: 3 additions & 3 deletions pytensor/tensor/rewriting/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1555,11 +1555,11 @@ def local_sum_prod_div_dimshuffle(fgraph, node):
)

if len(compatible_dims) > 0:
optimized_dimshuffle_order = list(
optimized_dimshuffle_order = [
ax
for i, ax in enumerate(dimshuffle_order)
if (i not in axis) or (ax != "x")
)
]

# Removing leading 'x' (since it will be done automatically)
while (
Expand Down Expand Up @@ -1644,7 +1644,7 @@ def local_op_of_op(fgraph, node):
return [op_type(None, dtype=out_dtype)(node_inps.owner.inputs[0])]

# figure out which axes were in the original sum
newaxis = list(tuple(node_inps.owner.op.axis))
newaxis = list(node_inps.owner.op.axis)
for i in node.op.axis:
new_i = i
for ii in node_inps.owner.op.axis:
Expand Down
6 changes: 3 additions & 3 deletions pytensor/tensor/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,7 @@ def shape_padleft(t, n_ones=1):
"""
_t = at.as_tensor_variable(t)

pattern = ["x"] * n_ones + [i for i in range(_t.type.ndim)]
pattern = ["x"] * n_ones + list(range(_t.type.ndim))
return _t.dimshuffle(pattern)


Expand All @@ -826,7 +826,7 @@ def shape_padright(t, n_ones=1):
"""
_t = at.as_tensor_variable(t)

pattern = [i for i in range(_t.type.ndim)] + ["x"] * n_ones
pattern = list(range(_t.type.ndim)) + ["x"] * n_ones
return _t.dimshuffle(pattern)


Expand Down Expand Up @@ -861,7 +861,7 @@ def shape_padaxis(t, axis):
if axis < 0:
axis += ndim

pattern = [i for i in range(_t.type.ndim)]
pattern = list(range(_t.type.ndim))
pattern.insert(axis, "x")
return _t.dimshuffle(pattern)

Expand Down
2 changes: 1 addition & 1 deletion pytensor/tensor/subtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2604,7 +2604,7 @@ def infer_shape(self, fgraph, node, ishapes):
ishapes[0], index_shapes, indices_are_shapes=True
)
assert node.outputs[0].ndim == len(res_shape)
return [[s for s in res_shape]]
return [list(res_shape)]

def perform(self, node, inputs, out_):
(out,) = out_
Expand Down
3 changes: 1 addition & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
[flake8]
select = C,E,F,W
ignore = E203,E231,E501,E741,W503,W504,C901
max-line-length = 88
ignore = E203,E231,E501,E741,W503,W504,C408,C901
per-file-ignores =
**/__init__.py:F401,E402,F403
pytensor/tensor/linalg.py:F401,F403
Expand Down
4 changes: 2 additions & 2 deletions tests/graph/test_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def inputs():

assert hasattr(g, "get_nodes")
for type, num in ((add, 3), (sigmoid, 3), (dot, 2)):
if len([t for t in g.get_nodes(type)]) != num:
if len(list(g.get_nodes(type))) != num:
raise Exception("Expected: %i times %s" % (num, type))
new_e0 = add(y, z)
assert e0.owner in g.get_nodes(dot)
Expand All @@ -82,7 +82,7 @@ def inputs():
assert e0.owner not in g.get_nodes(dot)
assert new_e0.owner in g.get_nodes(add)
for type, num in ((add, 4), (sigmoid, 3), (dot, 1)):
if len([t for t in g.get_nodes(type)]) != num:
if len(list(g.get_nodes(type))) != num:
raise Exception("Expected: %i times %s" % (num, type))


Expand Down
2 changes: 1 addition & 1 deletion tests/graph/test_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def test_sanity_0(self):
r1, r2 = MyType(1)(), MyType(2)()
node = MyOp.make_node(r1, r2)
# Are the inputs what I provided?
assert [x for x in node.inputs] == [r1, r2]
assert list(node.inputs) == [r1, r2]
# Are the outputs what I expect?
assert [x.type for x in node.outputs] == [MyType(3)]
assert node.outputs[0].owner is node and node.outputs[0].index == 0
Expand Down
2 changes: 1 addition & 1 deletion tests/tensor/rewriting/test_elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -1123,7 +1123,7 @@ def test_add_mul_fusion_inplace(self):
out = dot(x, y) + x + y + z

f = function([x, y, z], out, mode=self.mode)
topo = [n for n in f.maker.fgraph.toposort()]
topo = list(f.maker.fgraph.toposort())
assert len(topo) == 2
assert topo[-1].op.inplace_pattern

Expand Down
6 changes: 3 additions & 3 deletions tests/tensor/rewriting/test_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -3994,9 +3994,9 @@ def test_is_1pexp(self):
exp_op = exp
assert is_1pexp(1 + exp_op(x), False) == (False, x)
assert is_1pexp(exp_op(x) + 1, False) == (False, x)
for neg_, exp_arg in map(
lambda x: is_1pexp(x, only_process_constants=False),
[(1 + exp_op(-x)), (exp_op(-x) + 1)],
for neg_, exp_arg in (
is_1pexp(x, only_process_constants=False)
for x in [(1 + exp_op(-x)), (exp_op(-x) + 1)]
):
assert not neg_ and is_same_graph(exp_arg, -x)
assert is_1pexp(1 - exp_op(x), False) is None
Expand Down
4 changes: 2 additions & 2 deletions tests/tensor/rewriting/test_subtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2004,7 +2004,7 @@ def test_local_subtensor_SpecifyShape_lift(x, s, idx, x_val, s_val):
y_val_fn = function(
[x] + list(s), y, on_unused_input="ignore", mode=no_rewrites_mode
)
y_val = y_val_fn(*([x_val] + [s_ for s_ in s_val]))
y_val = y_val_fn(*([x_val] + list(s_val)))

# This optimization should appear in the canonicalizations
y_opt = rewrite_graph(y, clone=False)
Expand All @@ -2017,7 +2017,7 @@ def test_local_subtensor_SpecifyShape_lift(x, s, idx, x_val, s_val):
assert isinstance(y_opt.owner.op, SpecifyShape)

y_opt_fn = function([x] + list(s), y_opt, on_unused_input="ignore")
y_opt_val = y_opt_fn(*([x_val] + [s_ for s_ in s_val]))
y_opt_val = y_opt_fn(*([x_val] + list(s_val)))

assert np.allclose(y_val, y_opt_val)

Expand Down
6 changes: 3 additions & 3 deletions tests/tensor/test_blas.py
Original file line number Diff line number Diff line change
Expand Up @@ -2589,10 +2589,10 @@ def test_ger(self):
op=batched_dot,
expected=(
lambda xs, ys: np.asarray(
list(
[
x * y if x.ndim == 0 or y.ndim == 0 else np.dot(x, y)
for x, y in zip(xs, ys)
),
],
dtype=aes.upcast(xs.dtype, ys.dtype),
)
),
Expand Down Expand Up @@ -2694,7 +2694,7 @@ def check_first_dim(inverted):
assert x.strides[0] == direction * np.dtype(config.floatX).itemsize
assert not (x.flags["C_CONTIGUOUS"] or x.flags["F_CONTIGUOUS"])
result = f(x, w)
ref_result = np.asarray(list(np.dot(u, v) for u, v in zip(x, w)))
ref_result = np.asarray([np.dot(u, v) for u, v in zip(x, w)])
utt.assert_allclose(ref_result, result)

for inverted in (0, 1):
Expand Down
4 changes: 1 addition & 3 deletions tests/tensor/test_complex.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ def test_basic(self):
x = zvector()
rng = np.random.default_rng(23)
xval = np.asarray(
list(
complex(rng.standard_normal(), rng.standard_normal()) for i in range(10)
)
[complex(rng.standard_normal(), rng.standard_normal()) for i in range(10)]
)
assert np.all(xval.real == pytensor.function([x], real(x))(xval))
assert np.all(xval.imag == pytensor.function([x], imag(x))(xval))
Expand Down
Loading