From f894488fd1bb9b3b3463a8f33861dac7f5d72925 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 15:06:36 +0100 Subject: [PATCH 01/22] add config --- .pre-commit-config.yaml | 19 +- .pylintrc | 372 ---------------------------------------- pyproject.toml | 23 +-- setup.cfg | 4 - 4 files changed, 19 insertions(+), 399 deletions(-) delete mode 100644 .pylintrc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f948062a1e..13c41e25487 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,28 +23,21 @@ repos: - --exclude=scripts/ - --exclude=binder/ - --exclude=versioneer.py -- repo: https://github.com/PyCQA/isort - rev: 5.13.2 - hooks: - - id: isort - name: isort - repo: https://github.com/asottile/pyupgrade rev: v3.15.0 hooks: - id: pyupgrade args: [--py37-plus] +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.11 + hooks: + - id: ruff + args: ["--fix", "--show-source"] + - id: ruff-format - repo: https://github.com/psf/black rev: 23.12.1 hooks: - - id: black - id: black-jupyter -- repo: https://github.com/PyCQA/pylint - rev: v3.0.3 - hooks: - - id: pylint - args: [--rcfile=.pylintrc] - files: ^pymc/ - exclude: (?x)(pymc/_version.py) - repo: https://github.com/PyCQA/pydocstyle rev: 6.3.0 hooks: diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 952050ed8f3..00000000000 --- a/.pylintrc +++ /dev/null @@ -1,372 +0,0 @@ -[MASTER] -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. -optimize-ast=no - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=all - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -enable=import-self, - reimported, - wildcard-import, - misplaced-future, - relative-import, - deprecated-module, - unpacking-non-sequence, - invalid-all-object, - undefined-all-variable, - used-before-assignment, - cell-var-from-loop, - global-variable-undefined, - dangerous-default-value, - # redefined-builtin, - redefine-in-handler, - unused-import, - unused-wildcard-import, - global-variable-not-assigned, - undefined-loop-variable, - global-statement, - global-at-module-level, - bad-open-mode, - redundant-unittest-assert, - boolean-datetime, - # unused-variable - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=parseable - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=no - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -[BASIC] - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,input - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=yes - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=100 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). This supports can work -# with qualified names. -ignored-classes= - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/pyproject.toml b/pyproject.toml index 75a2e1f3274..3178390c54c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,24 @@ [tool.pytest.ini_options] minversion = "6.0" -xfail_strict=true -addopts = [ - "--color=yes", -] +xfail_strict = true +addopts = ["--color=yes"] -[tool.black] +[tool.ruff] line-length = 100 +[tool.ruff.lint] +ignore-init-module-imports = true +ignore = [ + "E731", # Do not assign a lambda expression, use a def + "F841", # Local variable name is assigned to but never used +] + +[tool.ruff.extend-per-file-ignores] +"__init__.py" = ["E402", "F401", "F403"] + [tool.coverage.report] exclude_lines = [ "pragma: nocover", "raise NotImplementedError", "if TYPE_CHECKING:", ] - -[tool.nbqa.mutate] -isort = 1 -black = 1 -pyupgrade = 1 diff --git a/setup.cfg b/setup.cfg index d57a6141b4f..5fec12d5d98 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,10 +4,6 @@ testpaths = tests [coverage:run] omit = *examples* -[isort] -lines_between_types = 1 -profile = black - [versioneer] VCS = git style = pep440 From d3ab381ec0c2d2930baac236cf4b024e0ceb21af Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 15:12:28 +0100 Subject: [PATCH 02/22] pymc small fixes --- .pre-commit-config.yaml | 4 --- pymc/backends/arviz.py | 21 +++++++++++---- pymc/distributions/bound.py | 4 +-- pymc/distributions/continuous.py | 32 ++++++++++++++++++++--- pymc/gp/hsgp_approx.py | 4 +-- pymc/logprob/__init__.py | 2 -- pymc/logprob/rewriting.py | 11 ++++++-- pymc/model/transform/conditioning.py | 4 +-- pymc/sampling/jax.py | 4 +-- pymc/sampling/mcmc.py | 4 ++- pymc/sampling_jax.py | 2 +- pymc/smc/sampling.py | 10 +++++-- pymc/testing.py | 39 +++++++++++++++++++++------- pymc/tuning/scaling.py | 2 +- 14 files changed, 105 insertions(+), 38 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13c41e25487..a915addf26d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,10 +34,6 @@ repos: - id: ruff args: ["--fix", "--show-source"] - id: ruff-format -- repo: https://github.com/psf/black - rev: 23.12.1 - hooks: - - id: black-jupyter - repo: https://github.com/PyCQA/pydocstyle rev: 6.3.0 hooks: diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index c8d85899ca2..1276fe3fdd5 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -23,7 +23,6 @@ List, Mapping, Optional, - Sequence, Tuple, Union, ) @@ -100,7 +99,9 @@ def is_data(name, var, model) -> bool: return constant_data -def coords_and_dims_for_inferencedata(model: Model) -> Tuple[Dict[str, Any], Dict[str, Any]]: +def coords_and_dims_for_inferencedata( + model: Model, +) -> Tuple[Dict[str, Any], Dict[str, Any]]: """Parse PyMC model coords and dims format to one accepted by InferenceData.""" coords = { cname: np.array(cvals) if isinstance(cvals, tuple) else cvals @@ -240,7 +241,9 @@ def __init__( self.observations = find_observations(self.model) - def split_trace(self) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]: + def split_trace( + self, + ) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]: """Split MultiTrace object into posterior and warmup. Returns @@ -342,7 +345,11 @@ def posterior_predictive_to_xarray(self): data = self.posterior_predictive dims = {var_name: self.sample_dims + self.dims.get(var_name, []) for var_name in data} return dict_to_dataset( - data, library=pymc, coords=self.coords, dims=dims, default_dims=self.sample_dims + data, + library=pymc, + coords=self.coords, + dims=dims, + default_dims=self.sample_dims, ) @requires(["predictions"]) @@ -351,7 +358,11 @@ def predictions_to_xarray(self): data = self.predictions dims = {var_name: self.sample_dims + self.dims.get(var_name, []) for var_name in data} return dict_to_dataset( - data, library=pymc, coords=self.coords, dims=dims, default_dims=self.sample_dims + data, + library=pymc, + coords=self.coords, + dims=dims, + default_dims=self.sample_dims, ) def priors_to_xarray(self): diff --git a/pymc/distributions/bound.py b/pymc/distributions/bound.py index 3c714e3aed7..0f4379d97fb 100644 --- a/pymc/distributions/bound.py +++ b/pymc/distributions/bound.py @@ -286,9 +286,9 @@ def _set_values(cls, lower, upper, size, shape, initval): size = shape lower = np.asarray(lower) - lower = floatX(np.where(lower == None, -np.inf, lower)) + lower = floatX(np.where(lower == None, -np.inf, lower)) # noqa E711 upper = np.asarray(upper) - upper = floatX(np.where(upper == None, np.inf, upper)) + upper = floatX(np.where(upper == None, np.inf, upper)) # noqa E711 if initval is None: _size = np.broadcast_shapes(to_tuple(size), np.shape(lower), np.shape(upper)) diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 76932db6f6d..5403f186e59 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -303,6 +303,7 @@ class Uniform(BoundedContinuous): upper : tensor_like of float, default 1 Upper limit. """ + rv_op = uniform bound_args_indices = (3, 4) # Lower, Upper @@ -400,7 +401,9 @@ def logp(value): def logcdf(value): return pt.switch( - pt.eq(value, -np.inf), -np.inf, pt.switch(pt.eq(value, np.inf), 0, pt.log(0.5)) + pt.eq(value, -np.inf), + -np.inf, + pt.switch(pt.eq(value, np.inf), 0, pt.log(0.5)), ) @@ -508,6 +511,7 @@ class Normal(Continuous): with pm.Model(): x = pm.Normal('x', mu=0, tau=1/23) """ + rv_op = normal @classmethod @@ -841,6 +845,7 @@ class HalfNormal(PositiveContinuous): with pm.Model(): x = pm.HalfNormal('x', tau=1/15) """ + rv_op = halfnormal @classmethod @@ -984,6 +989,7 @@ class Wald(PositiveContinuous): .. [Giner2016] Göknur Giner, Gordon K. Smyth (2016) statmod: Probability Calculations for the Inverse Gaussian Distribution """ + rv_op = wald @classmethod @@ -1050,7 +1056,7 @@ def logp(value, mu, lam, alpha): def logcdf(value, mu, lam, alpha): value -= alpha q = value / mu - l = lam * mu + l = lam * mu # noqa E741 r = pt.sqrt(value * lam) a = normal_lcdf(0, 1, (q - 1.0) / r) @@ -1295,6 +1301,7 @@ class Kumaraswamy(UnitContinuous): b : tensor_like of float b > 0. """ + rv_op = kumaraswamy @classmethod @@ -1383,6 +1390,7 @@ class Exponential(PositiveContinuous): scale: tensor_like of float Alternative parameter (scale = 1/lam). """ + rv_op = exponential @classmethod @@ -1480,6 +1488,7 @@ class Laplace(Continuous): b : tensor_like of float Scale parameter (b > 0). """ + rv_op = laplace @classmethod @@ -1524,7 +1533,9 @@ def logcdf(value, mu, b): def icdf(value, mu, b): res = pt.switch( - pt.le(value, 0.5), mu + b * np.log(2 * value), mu - b * np.log(2 - 2 * value) + pt.le(value, 0.5), + mu + b * np.log(2 * value), + mu - b * np.log(2 - 2 * value), ) res = check_icdf_value(res, value) return check_icdf_parameters(res, b > 0, msg="b > 0") @@ -1595,6 +1606,7 @@ class AsymmetricLaplace(Continuous): The parametrization in terms of q is useful for quantile regression with q being the quantile of interest. """ + rv_op = asymmetriclaplace @classmethod @@ -1826,6 +1838,7 @@ class StudentT(Continuous): with pm.Model(): x = pm.StudentT('x', nu=15, mu=0, lam=1/23) """ + rv_op = t @classmethod @@ -1922,6 +1935,7 @@ class Pareto(BoundedContinuous): m : tensor_like of float Scale parameter (m > 0). """ + rv_op = pareto bound_args_indices = (4, None) # lower-bounded by `m` @@ -2030,6 +2044,7 @@ class Cauchy(Continuous): beta : tensor_like of float Scale parameter > 0. """ + rv_op = cauchy @classmethod @@ -2110,6 +2125,7 @@ class HalfCauchy(PositiveContinuous): beta : tensor_like of float Scale parameter (beta > 0). """ + rv_op = halfcauchy @classmethod @@ -2213,6 +2229,7 @@ class Gamma(PositiveContinuous): sigma : tensor_like of float, optional Alternative scale parameter (sigma > 0). """ + # gamma is temporarily a deprecation wrapper in PyTensor rv_op = _gamma @@ -2321,6 +2338,7 @@ class InverseGamma(PositiveContinuous): sigma : tensor_like of float, optional Alternative scale parameter (sigma > 0). """ + rv_op = invgamma @classmethod @@ -2635,6 +2653,7 @@ class HalfStudentT(PositiveContinuous): with pm.Model(): x = pm.HalfStudentT('x', lam=4, nu=10) """ + rv_op = halfstudentt @classmethod @@ -2755,6 +2774,7 @@ class ExGaussian(Continuous): Tutorials in Quantitative Methods for Psychology, Vol. 4, No. 1, pp 35-45. """ + rv_op = exgaussian @classmethod @@ -2962,6 +2982,7 @@ class SkewNormal(Continuous): approaching plus/minus infinite we get a half-normal distribution. """ + rv_op = skewnormal @classmethod @@ -3177,6 +3198,7 @@ class Gumbel(Continuous): beta : tensor_like of float Scale parameter (beta > 0). """ + rv_op = gumbel @classmethod @@ -3295,6 +3317,7 @@ class Rice(PositiveContinuous): b = \dfrac{\nu}{\sigma} """ + rv_op = rice @classmethod @@ -3499,6 +3522,7 @@ class LogitNormal(UnitContinuous): Scale parameter (tau > 0). Defaults to 1. """ + rv_op = logit_normal @classmethod @@ -3741,6 +3765,7 @@ class Moyal(Continuous): sigma : tensor_like of float, default 1 Scale parameter (sigma > 0). """ + rv_op = moyal @classmethod @@ -3939,6 +3964,7 @@ class PolyaGamma(PositiveContinuous): from logistic likelihoods.(PhD thesis). Retrieved from http://hdl.handle.net/2152/21842 """ + rv_op = polyagamma @classmethod diff --git a/pymc/gp/hsgp_approx.py b/pymc/gp/hsgp_approx.py index bbdf8b9d979..04240c6792f 100644 --- a/pymc/gp/hsgp_approx.py +++ b/pymc/gp/hsgp_approx.py @@ -332,7 +332,7 @@ def prior_linearized(self, Xs: TensorLike): omega = pt.sqrt(eigvals) psd = self.cov_func.power_spectral_density(omega) - i = int(self._drop_first == True) + i = int(self._drop_first == True) # noqa E712 return phi[:, i:], pt.sqrt(psd[i:]) def prior(self, name: str, X: TensorLike, dims: Optional[str] = None): # type: ignore @@ -382,7 +382,7 @@ def _build_conditional(self, Xnew): eigvals = calc_eigenvalues(self.L, self._m, tl=pt) phi = calc_eigenvectors(Xnew - X_mean, self.L, eigvals, self._m, tl=pt) - i = int(self._drop_first == True) + i = int(self._drop_first == True) # noqa E712 if self._parameterization == "noncentered": return self.mean_func(Xnew) + phi[:, i:] @ (beta * sqrt_psd) diff --git a/pymc/logprob/__init__.py b/pymc/logprob/__init__.py index 89e9e3b6430..bed9ee3a9c8 100644 --- a/pymc/logprob/__init__.py +++ b/pymc/logprob/__init__.py @@ -42,7 +42,6 @@ transformed_conditional_logp, ) -# isort: off # Add rewrites to the DBs import pymc.logprob.binary import pymc.logprob.censoring @@ -54,7 +53,6 @@ import pymc.logprob.tensor import pymc.logprob.transforms -# isort: on __all__ = ( "logp", diff --git a/pymc/logprob/rewriting.py b/pymc/logprob/rewriting.py index e4ad5b3912d..77b753a8021 100644 --- a/pymc/logprob/rewriting.py +++ b/pymc/logprob/rewriting.py @@ -145,7 +145,9 @@ def apply(self, fgraph): continue if not any(out in rv_map_feature.needs_measuring for out in node.outputs): continue - for node_rewriter in self.node_tracker.get_trackers(node.op): + for node_rewriter in self.node_tracker.get_trackers( # noqa: F402 + node.op + ): # noqa: F402 node_rewriter_change = self.process_node(fgraph, node, node_rewriter) if not node_rewriter_change: continue @@ -389,7 +391,12 @@ def incsubtensor_rv_replace(fgraph, node): cleanup_ir_rewrites_db.name = "cleanup_ir_rewrites_db" logprob_rewrites_db.register( "cleanup_ir_rewrites", - TopoDB(cleanup_ir_rewrites_db, order="out_to_in", ignore_newtrees=True, failure_callback=None), + TopoDB( + cleanup_ir_rewrites_db, + order="out_to_in", + ignore_newtrees=True, + failure_callback=None, + ), "cleanup", ) diff --git a/pymc/model/transform/conditioning.py b/pymc/model/transform/conditioning.py index fa2b2055a68..0cb2a2f9432 100644 --- a/pymc/model/transform/conditioning.py +++ b/pymc/model/transform/conditioning.py @@ -96,7 +96,7 @@ def observe( valid_model_vars = set(model.free_RVs + model.deterministics) if any(var not in valid_model_vars for var in vars_to_observations): - raise ValueError(f"At least one var is not a free variable or deterministic in the model") + raise ValueError("At least one var is not a free variable or deterministic in the model") fgraph, memo = fgraph_from_model(model) @@ -177,7 +177,7 @@ def do( ) from err if any(var not in model.named_vars.values() for var in do_mapping): - raise ValueError(f"At least one var is not a named variable in the model") + raise ValueError("At least one var is not a named variable in the model") fgraph, memo = fgraph_from_model(model, inlined_views=True) diff --git a/pymc/sampling/jax.py b/pymc/sampling/jax.py index c0d3690b0d3..263ed520281 100644 --- a/pymc/sampling/jax.py +++ b/pymc/sampling/jax.py @@ -504,7 +504,7 @@ def sample_blackjax_nuts( if idata_kwargs.pop("log_likelihood", False): tic5 = datetime.now() - logger.info(f"Computing Log Likelihood...") + logger.info("Computing Log Likelihood...") log_likelihood = _get_log_likelihood( model, raw_mcmc_samples, @@ -732,7 +732,7 @@ def sample_numpyro_nuts( if idata_kwargs.pop("log_likelihood", False): tic5 = datetime.now() - logger.info(f"Computing Log Likelihood...") + logger.info("Computing Log Likelihood...") log_likelihood = _get_log_likelihood( model, raw_mcmc_samples, diff --git a/pymc/sampling/mcmc.py b/pymc/sampling/mcmc.py index 1e859fbc81b..748d16fae28 100644 --- a/pymc/sampling/mcmc.py +++ b/pymc/sampling/mcmc.py @@ -1104,7 +1104,9 @@ def _iter_sample( point, stats = step.step(point) trace.record(point, stats) log_warning_stats(stats) - diverging = i > tune and len(stats) > 0 and (stats[0].get("diverging") == True) + diverging = ( + i > tune and len(stats) > 0 and (stats[0].get("diverging") == True) # noqa E712 + ) # noqa E712 if callback is not None: callback( trace=trace, diff --git a/pymc/sampling_jax.py b/pymc/sampling_jax.py index 86ce158e059..6bb0a1631ec 100644 --- a/pymc/sampling_jax.py +++ b/pymc/sampling_jax.py @@ -19,4 +19,4 @@ import warnings warnings.warn("This module is deprecated, use pymc.sampling.jax", DeprecationWarning) -from pymc.sampling.jax import * +from pymc.sampling.jax import * # noqa: E402, F403 diff --git a/pymc/smc/sampling.py b/pymc/smc/sampling.py index 33ffc6547a8..37c0a61e074 100644 --- a/pymc/smc/sampling.py +++ b/pymc/smc/sampling.py @@ -211,7 +211,13 @@ def sample_smc( if cores > 1: results = run_chains_parallel( - chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores + chains, + progressbar, + _sample_smc_int, + params, + random_seed, + kernel_kwargs, + cores, ) else: results = run_chains_sequential( @@ -314,7 +320,7 @@ def _sample_smc_int( **kernel_kwargs, ): """Run one SMC instance.""" - in_out_pickled = type(model) == bytes + in_out_pickled = isinstance(model, bytes) if in_out_pickled: # function was called in multiprocessing context, deserialize first (draws, kernel, start, model) = map( diff --git a/pymc/testing.py b/pymc/testing.py index 918929d2c05..f0307af6fbf 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -52,9 +52,8 @@ # Remove slow rewrite phases .excluding("canonicalize", "specialize") # Include necessary rewrites for proper logp handling - .including("remove_TransformedVariables").register( - (in2out(local_check_parameter_to_ninf_switch), -1) - ) + .including("remove_TransformedVariables") + .register((in2out(local_check_parameter_to_ninf_switch), -1)) ) @@ -212,7 +211,7 @@ def RandomPdMatrix(n): Rdunif = Domain([-np.inf, -1, 0, 1, np.inf], "int64") Rplusunif = Domain([0, 0.5, np.inf]) Rplusdunif = Domain([0, 10, np.inf], "int64") -I = Domain([-np.inf, -3, -2, -1, 0, 1, 2, 3, np.inf], "int64") +I = Domain([-np.inf, -3, -2, -1, 0, 1, 2, 3, np.inf], "int64") # noqa E741 NatSmall = Domain([0, 3, 4, 5, np.inf], "int64") Nat = Domain([0, 1, 2, 3, np.inf], "int64") NatBig = Domain([0, 1, 2, 3, 5000, np.inf], "int64") @@ -270,7 +269,7 @@ def create_dist_from_paramdomains( def find_invalid_scalar_params( - paramdomains: Dict["str", Domain] + paramdomains: Dict["str", Domain], ) -> Dict["str", Tuple[Union[None, float], Union[None, float]]]: """Find invalid parameter values from bounded scalar parameter domains. @@ -876,7 +875,9 @@ def get_random_state(self, reset=False): def _instantiate_pymc_rv(self, dist_params=None): params = dist_params if dist_params else self.pymc_dist_params self.pymc_rv = self.pymc_dist.dist( - **params, size=self.size, rng=pytensor.shared(self.get_random_state(reset=True)) + **params, + size=self.size, + rng=pytensor.shared(self.get_random_state(reset=True)), ) def check_pymc_draws_match_reference(self): @@ -900,8 +901,24 @@ def check_pymc_params_match_rv_op(self): def check_rv_size(self): # test sizes - sizes_to_check = self.sizes_to_check or [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)] - sizes_expected = self.sizes_expected or [(), (), (1,), (1,), (5,), (4, 5), (2, 4, 2)] + sizes_to_check = self.sizes_to_check or [ + None, + (), + 1, + (1,), + 5, + (4, 5), + (2, 4, 2), + ] + sizes_expected = self.sizes_expected or [ + (), + (), + (1,), + (1,), + (5,), + (4, 5), + (2, 4, 2), + ] for size, expected in zip(sizes_to_check, sizes_expected): pymc_rv = self.pymc_dist.dist(**self.pymc_dist_params, size=size) expected_symbolic = tuple(pymc_rv.shape.eval()) @@ -919,7 +936,11 @@ def check_rv_size(self): k: p * np.ones(self.repeated_params_shape) for k, p in self.pymc_dist_params.items() } self._instantiate_pymc_rv(params) - sizes_to_check = [None, self.repeated_params_shape, (5, self.repeated_params_shape)] + sizes_to_check = [ + None, + self.repeated_params_shape, + (5, self.repeated_params_shape), + ] sizes_expected = [ (self.repeated_params_shape,), (self.repeated_params_shape,), diff --git a/pymc/tuning/scaling.py b/pymc/tuning/scaling.py index be68a38c1d2..0b286dee6e5 100644 --- a/pymc/tuning/scaling.py +++ b/pymc/tuning/scaling.py @@ -100,7 +100,7 @@ def adjust_precision(tau, scaling_bound=1e-8): return exp(bounded) ** 2 -def bound(a, l, u): +def bound(a, l, u): # noqa E741 return np.maximum(np.minimum(a, u), l) From c7b1a7337b0d0aa8dd0b0b1bf9b0565c4730befa Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 15:13:05 +0100 Subject: [PATCH 03/22] tests fix imports --- tests/distributions/test_continuous.py | 8 ++------ tests/distributions/test_discrete.py | 2 +- tests/distributions/test_dist_math.py | 2 +- tests/distributions/test_distribution.py | 1 - tests/distributions/test_mixture.py | 1 - tests/distributions/test_transform.py | 10 ++++------ tests/gp/test_hsgp_approx.py | 3 +-- tests/logprob/test_abstract.py | 1 - tests/logprob/test_basic.py | 3 --- tests/logprob/test_composite_logprob.py | 1 - tests/logprob/test_mixture.py | 4 ++-- tests/logprob/test_rewriting.py | 1 - tests/logprob/test_transform_value.py | 2 +- tests/logprob/utils.py | 1 - tests/model/test_core.py | 8 ++++---- tests/sampling/test_mcmc.py | 1 - tests/test_math.py | 3 --- 17 files changed, 16 insertions(+), 36 deletions(-) diff --git a/tests/distributions/test_continuous.py b/tests/distributions/test_continuous.py index 80bbb816c8f..697b09ac59e 100644 --- a/tests/distributions/test_continuous.py +++ b/tests/distributions/test_continuous.py @@ -27,7 +27,7 @@ import pymc as pm -from pymc.distributions.continuous import Normal, Uniform, get_tau_sigma, interpolated +from pymc.distributions.continuous import get_tau_sigma, interpolated from pymc.distributions.dist_math import clipped_beta_rvs from pymc.logprob.basic import icdf, logcdf, logp from pymc.logprob.utils import ParameterValueError @@ -37,7 +37,6 @@ Circ, Domain, R, - Rminusbig, Rplus, Rplusbig, Rplusunif, @@ -52,7 +51,6 @@ seeded_scipy_distribution_builder, select_by_precision, ) -from tests.logprob.utils import create_pytensor_params, scipy_logprob_tester try: from polyagamma import polyagamma_cdf, polyagamma_pdf, random_polyagamma @@ -417,9 +415,7 @@ def test_beta_logcdf(self): def test_kumaraswamy(self): # Scipy does not have a built-in Kumaraswamy def scipy_log_pdf(value, a, b): - return ( - np.log(a) + np.log(b) + (a - 1) * np.log(value) + (b - 1) * np.log(1 - value**a) - ) + return np.log(a) + np.log(b) + (a - 1) * np.log(value) + (b - 1) * np.log(1 - value**a) def scipy_log_cdf(value, a, b): return pm.math.log1mexp_numpy(b * np.log1p(-(value**a)), negative_input=True) diff --git a/tests/distributions/test_discrete.py b/tests/distributions/test_discrete.py index ee9ef24b2bd..e5934957407 100644 --- a/tests/distributions/test_discrete.py +++ b/tests/distributions/test_discrete.py @@ -28,7 +28,7 @@ import pymc as pm -from pymc.distributions.discrete import Geometric, _OrderedLogistic, _OrderedProbit +from pymc.distributions.discrete import _OrderedLogistic, _OrderedProbit from pymc.logprob.basic import icdf, logcdf, logp from pymc.logprob.utils import ParameterValueError from pymc.pytensorf import floatX diff --git a/tests/distributions/test_dist_math.py b/tests/distributions/test_dist_math.py index 1f469df7205..be98fccb3dc 100644 --- a/tests/distributions/test_dist_math.py +++ b/tests/distributions/test_dist_math.py @@ -19,7 +19,7 @@ from pytensor import config, function from pytensor.tensor.random.basic import multinomial -from scipy import interpolate, stats +from scipy import interpolate import pymc as pm diff --git a/tests/distributions/test_distribution.py b/tests/distributions/test_distribution.py index 380640891c9..06582ae1797 100644 --- a/tests/distributions/test_distribution.py +++ b/tests/distributions/test_distribution.py @@ -30,7 +30,6 @@ from pymc.distributions import ( Censored, - DiracDelta, Flat, HalfNormal, LogNormal, diff --git a/tests/distributions/test_mixture.py b/tests/distributions/test_mixture.py index 1228cb8680f..8dfa5a8ca8d 100644 --- a/tests/distributions/test_mixture.py +++ b/tests/distributions/test_mixture.py @@ -14,7 +14,6 @@ import warnings -from contextlib import ExitStack as does_not_raise import numpy as np import pytensor diff --git a/tests/distributions/test_transform.py b/tests/distributions/test_transform.py index 502a05349b2..b0187a4ebec 100644 --- a/tests/distributions/test_transform.py +++ b/tests/distributions/test_transform.py @@ -13,8 +13,6 @@ # limitations under the License. -from typing import Union - import numpy as np import pytensor import pytensor.tensor as pt @@ -623,12 +621,12 @@ def test_univariate_transform_multivariate_dist_raises(): with pm.Model() as m: pm.Dirichlet("x", [1, 1, 1], transform=tr.log) - for jacobian in (True, False): + for jacobian_val in (True, False): with pytest.raises( NotImplementedError, match="Univariate transform LogTransform cannot be applied to multivariate", ): - m.logp(jacobian=jacobian) + m.logp(jacobian=jacobian_val) def test_invalid_jacobian_broadcast_raises(): @@ -649,12 +647,12 @@ def log_jac_det(self, value, *inputs): with pm.Model() as m: pm.Uniform("x", shape=(4, 3), transform=buggy_transform) - for jacobian in (True, False): + for jacobian_val in (True, False): with pytest.raises( ValueError, match="are not allowed to broadcast together. There is a bug in the implementation of either one", ): - m.logp(jacobian=jacobian) + m.logp(jacobian=jacobian_val) def test_deprecated_ndim_supp_transforms(): diff --git a/tests/gp/test_hsgp_approx.py b/tests/gp/test_hsgp_approx.py index 2d50bda5ff3..0474899dbfd 100644 --- a/tests/gp/test_hsgp_approx.py +++ b/tests/gp/test_hsgp_approx.py @@ -17,7 +17,6 @@ import pytensor import pytensor.tensor as pt import pytest -import scipy as sp from scipy.spatial import distance @@ -198,7 +197,7 @@ def test_prior(self, model, cov_func, X1, parameterization, rng): h0, mmd, critical_value, reject = two_sample_test( samples1, samples2, n_sims=500, alpha=0.01 ) - assert not reject, f"H0 was rejected, even though HSGP and GP priors should match." + assert not reject, "H0 was rejected, even though HSGP and GP priors should match." @pytest.mark.parametrize( "cov_func,parameterization", diff --git a/tests/logprob/test_abstract.py b/tests/logprob/test_abstract.py index 6d8b54e0136..7a0bc61e78f 100644 --- a/tests/logprob/test_abstract.py +++ b/tests/logprob/test_abstract.py @@ -42,7 +42,6 @@ import scipy.stats.distributions as sp from pytensor.scalar import Exp, exp -from pytensor.tensor.random.basic import NormalRV import pymc as pm diff --git a/tests/logprob/test_basic.py b/tests/logprob/test_basic.py index 397ef16d450..165a436a427 100644 --- a/tests/logprob/test_basic.py +++ b/tests/logprob/test_basic.py @@ -47,10 +47,7 @@ from pytensor.tensor.subtensor import ( AdvancedIncSubtensor, AdvancedIncSubtensor1, - AdvancedSubtensor, - AdvancedSubtensor1, IncSubtensor, - Subtensor, ) import pymc as pm diff --git a/tests/logprob/test_composite_logprob.py b/tests/logprob/test_composite_logprob.py index d94e87bfc73..e4cdfc7dc3e 100644 --- a/tests/logprob/test_composite_logprob.py +++ b/tests/logprob/test_composite_logprob.py @@ -43,7 +43,6 @@ from pymc import draw, logp from pymc.logprob.abstract import MeasurableVariable from pymc.logprob.basic import conditional_logp -from pymc.logprob.censoring import MeasurableClip from pymc.logprob.rewriting import construct_ir_fgraph from pymc.testing import assert_no_rvs diff --git a/tests/logprob/test_mixture.py b/tests/logprob/test_mixture.py index 5b4b079dddf..b3e5c5656e3 100644 --- a/tests/logprob/test_mixture.py +++ b/tests/logprob/test_mixture.py @@ -41,7 +41,7 @@ import scipy.stats.distributions as sp from pytensor import function -from pytensor.graph.basic import Variable, equal_computations +from pytensor.graph.basic import Variable from pytensor.ifelse import ifelse from pytensor.tensor.random.basic import CategoricalRV from pytensor.tensor.shape import shape_tuple @@ -54,7 +54,7 @@ from pymc.logprob.abstract import MeasurableVariable from pymc.logprob.basic import conditional_logp, logp -from pymc.logprob.mixture import MeasurableSwitchMixture, MixtureRV, expand_indices +from pymc.logprob.mixture import MeasurableSwitchMixture, expand_indices from pymc.logprob.rewriting import construct_ir_fgraph from pymc.logprob.utils import dirac_delta from pymc.testing import assert_no_rvs diff --git a/tests/logprob/test_rewriting.py b/tests/logprob/test_rewriting.py index 49b76be697f..66c28b102de 100644 --- a/tests/logprob/test_rewriting.py +++ b/tests/logprob/test_rewriting.py @@ -35,7 +35,6 @@ # SOFTWARE. import numpy as np -import pytensor import pytensor.tensor as pt import pytest import scipy.stats.distributions as sp diff --git a/tests/logprob/test_transform_value.py b/tests/logprob/test_transform_value.py index 2c425d00615..0976a239443 100644 --- a/tests/logprob/test_transform_value.py +++ b/tests/logprob/test_transform_value.py @@ -550,7 +550,7 @@ def scan_step(prev_innov): innov = [] prev_innov = init for i in range(4): - next_innov = pt.random.beta(prev_innov * 10, (1 - prev_innov) * 10, name=f"innov[i]") + next_innov = pt.random.beta(prev_innov * 10, (1 - prev_innov) * 10, name="innov[i]") innov.append(next_innov) prev_innov = next_innov innov = pt.stack(innov) diff --git a/tests/logprob/utils.py b/tests/logprob/utils.py index ca333dac99a..ac0d1acabc8 100644 --- a/tests/logprob/utils.py +++ b/tests/logprob/utils.py @@ -34,7 +34,6 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from typing import Optional import numpy as np diff --git a/tests/model/test_core.py b/tests/model/test_core.py index c5f1eb3592f..7bb8b28738d 100644 --- a/tests/model/test_core.py +++ b/tests/model/test_core.py @@ -17,7 +17,7 @@ import unittest import warnings -from unittest.mock import MagicMock, patch +from unittest.mock import patch import arviz as az import cloudpickle @@ -32,7 +32,7 @@ import scipy.stats as st from pytensor.graph import graph_inputs -from pytensor.raise_op import Assert, assert_op +from pytensor.raise_op import Assert from pytensor.tensor import TensorVariable from pytensor.tensor.random.op import RandomVariable from pytensor.tensor.sharedvar import ScalarSharedVariable @@ -1418,8 +1418,8 @@ def test_missing_multivariate_separable(self): a=[1, 2, 3], observed=np.array([[0.3, 0.3, 0.4], [np.nan, np.nan, np.nan]]), ) - assert (m_miss["x_unobserved"].owner.op, pm.Dirichlet) - assert (m_miss["x_observed"].owner.op, pm.Dirichlet) + assert isinstance(m_miss["x_unobserved"].owner.op, pm.Dirichlet) + assert isinstance(m_miss["x_observed"].owner.op, pm.Dirichlet) with pm.Model() as m_unobs: x = pm.Dirichlet("x", a=[1, 2, 3], shape=(1, 3)) diff --git a/tests/sampling/test_mcmc.py b/tests/sampling/test_mcmc.py index de51f7ee488..589788c9d32 100644 --- a/tests/sampling/test_mcmc.py +++ b/tests/sampling/test_mcmc.py @@ -16,7 +16,6 @@ import warnings from contextlib import ExitStack as does_not_raise -from copy import copy import numpy as np import numpy.testing as npt diff --git a/tests/test_math.py b/tests/test_math.py index 11482b62263..544bf4ce93e 100644 --- a/tests/test_math.py +++ b/tests/test_math.py @@ -24,19 +24,16 @@ LogDet, cartesian, expand_packed_triangular, - invlogit, invprobit, kron_dot, kron_solve_lower, kronecker, log1mexp, log1mexp_numpy, - log_softmax, logdet, logdiffexp, logdiffexp_numpy, probit, - softmax, ) from pymc.pytensorf import floatX from tests.helpers import verify_grad From 9981ca154ba03a88deaa96d16b119de6183017e5 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 15:13:57 +0100 Subject: [PATCH 04/22] fix scripts --- scripts/check_all_tests_are_covered.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/check_all_tests_are_covered.py b/scripts/check_all_tests_are_covered.py index c9cddeb9395..6717554da96 100644 --- a/scripts/check_all_tests_are_covered.py +++ b/scripts/check_all_tests_are_covered.py @@ -69,14 +69,14 @@ def from_yaml(): for os_, floatX, subset in itertools.product( matrix["os"], matrix["floatx"], matrix["test-subset"] ): - lines = [l for l in subset.split("\n") if l] + lines = [k for k in subset.split("\n") if k] if "windows" in os_: # Windows jobs need \ in line breaks within the test-subset! # The following checks that these trailing \ are present in # all items except the last. if lines and lines[-1].endswith(" \\"): raise Exception( - f"Last entry '{line}' in Windows test subset should end WITHOUT ' \\'." + f"Last entry '{lines}' in Windows test subset should end WITHOUT ' \\'." ) for line in lines[:-1]: if not line.endswith(" \\"): From c50bdf8c2e84c61953b892b8b80ea724bf1746b4 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 15:22:01 +0100 Subject: [PATCH 05/22] ruff formatter changes --- pymc/distributions/discrete.py | 8 ++++++++ pymc/distributions/multivariate.py | 16 +++++++++++++--- pymc/model/core.py | 6 ++---- pymc/ode/ode.py | 1 + pymc/printing.py | 4 +--- pymc/step_methods/metropolis.py | 6 +++--- pymc/tuning/starting.py | 2 +- pymc/variational/approximations.py | 13 ++++--------- pymc/variational/operators.py | 1 + pymc/variational/opvi.py | 6 ++++-- versioneer.py | 4 +--- 11 files changed, 39 insertions(+), 28 deletions(-) diff --git a/pymc/distributions/discrete.py b/pymc/distributions/discrete.py index f95b4374d47..877119350b0 100644 --- a/pymc/distributions/discrete.py +++ b/pymc/distributions/discrete.py @@ -112,6 +112,7 @@ class Binomial(Discrete): logit_p : tensor_like of float Alternative log odds for the probability of success. """ + rv_op = binomial @classmethod @@ -334,6 +335,7 @@ class Bernoulli(Discrete): logit_p : tensor_like of float Alternative log odds for the probability of success. """ + rv_op = bernoulli @classmethod @@ -450,6 +452,7 @@ def DiscreteWeibull(q, b, x): Shape parameter (beta > 0). """ + rv_op = discrete_weibull @classmethod @@ -539,6 +542,7 @@ class Poisson(Discrete): The Poisson distribution can be derived as a limiting case of the binomial distribution. """ + rv_op = poisson @classmethod @@ -662,6 +666,7 @@ def NegBinom(a, m, x): n : tensor_like of float Alternative number of target success trials (n > 0) """ + rv_op = nbinom @classmethod @@ -1108,6 +1113,7 @@ class Categorical(Discrete): logit_p : float Alternative log odds for the probability of success. """ + rv_op = categorical @classmethod @@ -1183,6 +1189,7 @@ class _OrderedLogistic(Categorical): Underlying class for ordered logistic distributions. See docs for the OrderedLogistic wrapper class for more details on how to use it in models. """ + rv_op = categorical @classmethod @@ -1289,6 +1296,7 @@ class _OrderedProbit(Categorical): Underlying class for ordered probit distributions. See docs for the OrderedProbit wrapper class for more details on how to use it in models. """ + rv_op = categorical @classmethod diff --git a/pymc/distributions/multivariate.py b/pymc/distributions/multivariate.py index 1e5a9567a4e..570c13988e9 100644 --- a/pymc/distributions/multivariate.py +++ b/pymc/distributions/multivariate.py @@ -235,6 +235,7 @@ class MvNormal(Continuous): vals_raw = pm.Normal('vals_raw', mu=0, sigma=1, shape=(5, 3)) vals = pm.Deterministic('vals', pt.dot(chol, vals_raw.T).T) """ + rv_op = multivariate_normal @classmethod @@ -355,6 +356,7 @@ class MvStudentT(Continuous): lower : bool, default=True Whether the cholesky fatcor is given as a lower triangular matrix. """ + rv_op = mv_studentt @classmethod @@ -436,6 +438,7 @@ class Dirichlet(SimplexContinuous): Concentration parameters (a > 0). The number of categories is given by the length of the last axis. """ + rv_op = dirichlet @classmethod @@ -515,6 +518,7 @@ class Multinomial(Discrete): categories is given by the length of the last axis. Elements are expected to sum to 1 along the last axis. """ + rv_op = multinomial @classmethod @@ -662,6 +666,7 @@ class DirichletMultinomial(Discrete): Dirichlet concentration parameters (a > 0). The number of categories is given by the length of the last axis. """ + rv_op = dirichlet_multinomial @classmethod @@ -716,6 +721,7 @@ class _OrderedMultinomial(Multinomial): Underlying class for ordered multinomial distributions. See docs for the OrderedMultinomial wrapper class for more details on how to use it in models. """ + rv_op = multinomial @classmethod @@ -940,6 +946,7 @@ class Wishart(Continuous): This distribution is unusable in a PyMC model. You should instead use LKJCholeskyCov or LKJCorr. """ + rv_op = wishart @classmethod @@ -1763,6 +1770,7 @@ class MatrixNormal(Continuous): vals = pm.MatrixNormal('vals', mu=mu, colchol=colchol, rowcov=rowcov, observed=data) """ + rv_op = matrixnormal @classmethod @@ -1977,6 +1985,7 @@ class KroneckerNormal(Continuous): ---------- .. [1] Saatchi, Y. (2011). "Scalable inference for structured Gaussian process models" """ + rv_op = kroneckernormal @classmethod @@ -2183,6 +2192,7 @@ class CAR(Continuous): "Generalized Hierarchical Multivariate CAR Models for Areal Data" Biometrics, Vol. 61, No. 4 (Dec., 2005), pp. 950-961 """ + rv_op = car @classmethod @@ -2400,9 +2410,7 @@ def moment(rv, size, W, node1, node2, N, sigma, zero_sum_stdev): return pt.zeros(N) def logp(value, W, node1, node2, N, sigma, zero_sum_stdev): - pairwise_difference = (-1 / (2 * sigma**2)) * pt.sum( - pt.square(value[node1] - value[node2]) - ) + pairwise_difference = (-1 / (2 * sigma**2)) * pt.sum(pt.square(value[node1] - value[node2])) zero_sum = ( -0.5 * pt.pow(pt.sum(value) / (zero_sum_stdev * N), 2) - pt.log(pt.sqrt(2.0 * np.pi)) @@ -2498,6 +2506,7 @@ class StickBreakingWeights(SimplexContinuous): .. [2] Müller, P., Quintana, F. A., Jara, A., & Hanson, T. (2015). Bayesian nonparametric data analysis. New York: Springer. """ + rv_op = stickbreakingweights @classmethod @@ -2641,6 +2650,7 @@ class ZeroSumNormal(Distribution): # the zero sum axes will be the last two v = pm.ZeroSumNormal("v", shape=(3, 4, 5), n_zerosum_axes=2) """ + rv_type = ZeroSumNormalRV def __new__( diff --git a/pymc/model/core.py b/pymc/model/core.py index c45f3f55052..6ee6d491a5c 100644 --- a/pymc/model/core.py +++ b/pymc/model/core.py @@ -138,9 +138,7 @@ def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument # FIXME: is there a more elegant way to automatically add methods to the class that # are instance methods instead of class methods? - def __init__( - cls, name, bases, nmspc, context_class: Optional[Type] = None, **kwargs - ): # pylint: disable=unused-argument + def __init__(cls, name, bases, nmspc, context_class: Optional[Type] = None, **kwargs): # pylint: disable=unused-argument """Add ``__enter__`` and ``__exit__`` methods to the new class automatically.""" if context_class is not None: cls._context_class = context_class @@ -1740,7 +1738,7 @@ def debug_parameters(rv): done = {} used_ids = {} for i, out in enumerate(rv_inputs.maker.fgraph.outputs): - print_(f"{i}: ", end=""), + (print_(f"{i}: ", end=""),) # Don't print useless deepcopys if out.owner and isinstance(out.owner.op, DeepCopyOp): out = out.owner.inputs[0] diff --git a/pymc/ode/ode.py b/pymc/ode/ode.py index a5e3741305e..600f30632ef 100644 --- a/pymc/ode/ode.py +++ b/pymc/ode/ode.py @@ -67,6 +67,7 @@ def odefunc(y, t, p): ode_model = DifferentialEquation(func=odefunc, times=times, n_states=1, n_theta=1, t0=0) """ + _itypes = [ TensorType(floatX, (False,)), # y0 as 1D floatX vector TensorType(floatX, (False,)), # theta as 1D floatX vector diff --git a/pymc/printing.py b/pymc/printing.py index ffc943aa108..9fe7d056cfe 100644 --- a/pymc/printing.py +++ b/pymc/printing.py @@ -123,9 +123,7 @@ def str_for_model(model: Model, formatting: str = "plain", include_params: bool \begin{{array}}{{rcl}} {} \end{{array}} - $$""".format( - "\\\\".join(var_reprs) - ) + $$""".format("\\\\".join(var_reprs)) else: # align vars on their ~ names = [s[: s.index("~") - 1] for s in var_reprs] diff --git a/pymc/step_methods/metropolis.py b/pymc/step_methods/metropolis.py index 1adb462d946..e080cdd098c 100644 --- a/pymc/step_methods/metropolis.py +++ b/pymc/step_methods/metropolis.py @@ -134,7 +134,7 @@ def __init__( tune_interval=100, model=None, mode=None, - **kwargs + **kwargs, ): """Create an instance of a Metropolis stepper @@ -771,7 +771,7 @@ def __init__( tune_interval=100, model=None, mode=None, - **kwargs + **kwargs, ): model = pm.modelcontext(model) initial_values = model.initial_point() @@ -915,7 +915,7 @@ def __init__( tune_drop_fraction: float = 0.9, model=None, mode=None, - **kwargs + **kwargs, ): model = pm.modelcontext(model) initial_values = model.initial_point() diff --git a/pymc/tuning/starting.py b/pymc/tuning/starting.py index 6a4d33894c1..ad5f554aed2 100644 --- a/pymc/tuning/starting.py +++ b/pymc/tuning/starting.py @@ -52,7 +52,7 @@ def find_MAP( model=None, *args, seed: Optional[int] = None, - **kwargs + **kwargs, ): """Finds the local maximum a posteriori point given a model. diff --git a/pymc/variational/approximations.py b/pymc/variational/approximations.py index 00df445997f..feb0a3a925f 100644 --- a/pymc/variational/approximations.py +++ b/pymc/variational/approximations.py @@ -46,6 +46,7 @@ class MeanFieldGroup(Group): that latent space variables are uncorrelated that is the main drawback of the method """ + __param_spec__ = dict(mu=("d",), rho=("d",)) short_name = "mean_field" alias_names = frozenset(["mf"]) @@ -350,27 +351,21 @@ def __dir__(self): class MeanField(SingleGroupApproximation): __doc__ = """**Single Group Mean Field Approximation** - """ + str( - MeanFieldGroup.__doc__ - ) + """ + str(MeanFieldGroup.__doc__) _group_class = MeanFieldGroup class FullRank(SingleGroupApproximation): __doc__ = """**Single Group Full Rank Approximation** - """ + str( - FullRankGroup.__doc__ - ) + """ + str(FullRankGroup.__doc__) _group_class = FullRankGroup class Empirical(SingleGroupApproximation): __doc__ = """**Single Group Full Rank Approximation** - """ + str( - EmpiricalGroup.__doc__ - ) + """ + str(EmpiricalGroup.__doc__) _group_class = EmpiricalGroup def __init__(self, trace=None, size=None, **kwargs): diff --git a/pymc/variational/operators.py b/pymc/variational/operators.py index 1122a704b8d..f6ef0957234 100644 --- a/pymc/variational/operators.py +++ b/pymc/variational/operators.py @@ -130,6 +130,7 @@ class KSD(Operator): Stein Variational Gradient Descent: A General Purpose Bayesian Inference Algorithm arXiv:1608.04471 """ + has_test_function = True returns_loss = False require_logq = False diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index cf98c985a5f..bd1874ffe8f 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -663,6 +663,7 @@ class Group(WithMemoization): - Kingma, D. P., & Welling, M. (2014). `Auto-Encoding Variational Bayes. stat, 1050, 1. `_ """ + # needs to be defined in init shared_params = None symbolic_initial = None @@ -709,8 +710,9 @@ def group_for_params(cls, params): def group_for_short_name(cls, name): if name.lower() not in cls.__name_registry: raise KeyError( - "No such group: {!r}, " - "only the following are supported\n\n{}".format(name, cls.__name_registry) + "No such group: {!r}, " "only the following are supported\n\n{}".format( + name, cls.__name_registry + ) ) return cls.__name_registry[name.lower()] diff --git a/versioneer.py b/versioneer.py index a560e685fb9..c2b9d28bc04 100644 --- a/versioneer.py +++ b/versioneer.py @@ -432,9 +432,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env= return stdout, process.returncode -LONG_VERSION_PY[ - "git" -] = r''' +LONG_VERSION_PY["git"] = r''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build From 8f54b96e1c66dfd8f4486aaf828b64aa3aa1590d Mon Sep 17 00:00:00 2001 From: Juan Orduz Date: Mon, 8 Jan 2024 16:48:06 +0100 Subject: [PATCH 06/22] Update pymc/model/core.py Co-authored-by: Ricardo Vieira <28983449+ricardoV94@users.noreply.github.com> --- pymc/model/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/model/core.py b/pymc/model/core.py index 6ee6d491a5c..0281387ad94 100644 --- a/pymc/model/core.py +++ b/pymc/model/core.py @@ -1738,7 +1738,7 @@ def debug_parameters(rv): done = {} used_ids = {} for i, out in enumerate(rv_inputs.maker.fgraph.outputs): - (print_(f"{i}: ", end=""),) + print_(f"{i}: ", end="") # Don't print useless deepcopys if out.owner and isinstance(out.owner.op, DeepCopyOp): out = out.owner.inputs[0] From bf843a4dfef3df67fce106b59bdf4bc3cf26850b Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 18:31:09 +0100 Subject: [PATCH 07/22] fix line length --- .pre-commit-config.yaml | 1 + pymc/backends/arviz.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a915addf26d..00fe8e811ed 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,6 +34,7 @@ repos: - id: ruff args: ["--fix", "--show-source"] - id: ruff-format + args: ["--line-length=100"] - repo: https://github.com/PyCQA/pydocstyle rev: 6.3.0 hooks: diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index 1276fe3fdd5..5b8ccd3de65 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -241,9 +241,7 @@ def __init__( self.observations = find_observations(self.model) - def split_trace( - self, - ) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]: + def split_trace(self) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]: """Split MultiTrace object into posterior and warmup. Returns From 02422792b52bff19879480c749681117fffcc301 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 20:57:36 +0100 Subject: [PATCH 08/22] revert line length part 1 --- pymc/backends/arviz.py | 16 +++------------- pymc/distributions/continuous.py | 12 ++++-------- pymc/gp/hsgp_approx.py | 4 ++-- pymc/logprob/__init__.py | 2 ++ pymc/logprob/rewriting.py | 11 ++--------- pymc/sampling/mcmc.py | 4 +--- pymc/smc/sampling.py | 8 +------- pymc/testing.py | 32 +++++--------------------------- 8 files changed, 20 insertions(+), 69 deletions(-) diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index 5b8ccd3de65..b683fcf6f67 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -99,9 +99,7 @@ def is_data(name, var, model) -> bool: return constant_data -def coords_and_dims_for_inferencedata( - model: Model, -) -> Tuple[Dict[str, Any], Dict[str, Any]]: +def coords_and_dims_for_inferencedata(model: Model) -> Tuple[Dict[str, Any], Dict[str, Any]]: """Parse PyMC model coords and dims format to one accepted by InferenceData.""" coords = { cname: np.array(cvals) if isinstance(cvals, tuple) else cvals @@ -343,11 +341,7 @@ def posterior_predictive_to_xarray(self): data = self.posterior_predictive dims = {var_name: self.sample_dims + self.dims.get(var_name, []) for var_name in data} return dict_to_dataset( - data, - library=pymc, - coords=self.coords, - dims=dims, - default_dims=self.sample_dims, + data, library=pymc, coords=self.coords, dims=dims, default_dims=self.sample_dims ) @requires(["predictions"]) @@ -356,11 +350,7 @@ def predictions_to_xarray(self): data = self.predictions dims = {var_name: self.sample_dims + self.dims.get(var_name, []) for var_name in data} return dict_to_dataset( - data, - library=pymc, - coords=self.coords, - dims=dims, - default_dims=self.sample_dims, + data, library=pymc, coords=self.coords, dims=dims, default_dims=self.sample_dims ) def priors_to_xarray(self): diff --git a/pymc/distributions/continuous.py b/pymc/distributions/continuous.py index 5403f186e59..c80b0105c56 100644 --- a/pymc/distributions/continuous.py +++ b/pymc/distributions/continuous.py @@ -401,9 +401,7 @@ def logp(value): def logcdf(value): return pt.switch( - pt.eq(value, -np.inf), - -np.inf, - pt.switch(pt.eq(value, np.inf), 0, pt.log(0.5)), + pt.eq(value, -np.inf), -np.inf, pt.switch(pt.eq(value, np.inf), 0, pt.log(0.5)) ) @@ -1056,11 +1054,11 @@ def logp(value, mu, lam, alpha): def logcdf(value, mu, lam, alpha): value -= alpha q = value / mu - l = lam * mu # noqa E741 + ell = lam * mu r = pt.sqrt(value * lam) a = normal_lcdf(0, 1, (q - 1.0) / r) - b = 2.0 / l + normal_lcdf(0, 1, -(q + 1.0) / r) + b = 2.0 / ell + normal_lcdf(0, 1, -(q + 1.0) / r) logcdf = pt.switch( pt.le(value, 0), @@ -1533,9 +1531,7 @@ def logcdf(value, mu, b): def icdf(value, mu, b): res = pt.switch( - pt.le(value, 0.5), - mu + b * np.log(2 * value), - mu - b * np.log(2 - 2 * value), + pt.le(value, 0.5), mu + b * np.log(2 * value), mu - b * np.log(2 - 2 * value) ) res = check_icdf_value(res, value) return check_icdf_parameters(res, b > 0, msg="b > 0") diff --git a/pymc/gp/hsgp_approx.py b/pymc/gp/hsgp_approx.py index 04240c6792f..40231b027c9 100644 --- a/pymc/gp/hsgp_approx.py +++ b/pymc/gp/hsgp_approx.py @@ -332,7 +332,7 @@ def prior_linearized(self, Xs: TensorLike): omega = pt.sqrt(eigvals) psd = self.cov_func.power_spectral_density(omega) - i = int(self._drop_first == True) # noqa E712 + i = int(self._drop_first is True) return phi[:, i:], pt.sqrt(psd[i:]) def prior(self, name: str, X: TensorLike, dims: Optional[str] = None): # type: ignore @@ -382,7 +382,7 @@ def _build_conditional(self, Xnew): eigvals = calc_eigenvalues(self.L, self._m, tl=pt) phi = calc_eigenvectors(Xnew - X_mean, self.L, eigvals, self._m, tl=pt) - i = int(self._drop_first == True) # noqa E712 + i = int(self._drop_first is True) if self._parameterization == "noncentered": return self.mean_func(Xnew) + phi[:, i:] @ (beta * sqrt_psd) diff --git a/pymc/logprob/__init__.py b/pymc/logprob/__init__.py index bed9ee3a9c8..89e9e3b6430 100644 --- a/pymc/logprob/__init__.py +++ b/pymc/logprob/__init__.py @@ -42,6 +42,7 @@ transformed_conditional_logp, ) +# isort: off # Add rewrites to the DBs import pymc.logprob.binary import pymc.logprob.censoring @@ -53,6 +54,7 @@ import pymc.logprob.tensor import pymc.logprob.transforms +# isort: on __all__ = ( "logp", diff --git a/pymc/logprob/rewriting.py b/pymc/logprob/rewriting.py index 77b753a8021..b79815fb96c 100644 --- a/pymc/logprob/rewriting.py +++ b/pymc/logprob/rewriting.py @@ -145,9 +145,7 @@ def apply(self, fgraph): continue if not any(out in rv_map_feature.needs_measuring for out in node.outputs): continue - for node_rewriter in self.node_tracker.get_trackers( # noqa: F402 - node.op - ): # noqa: F402 + for node_rewriter in self.node_tracker.get_trackers(node.op): # noqa: F402 node_rewriter_change = self.process_node(fgraph, node, node_rewriter) if not node_rewriter_change: continue @@ -391,12 +389,7 @@ def incsubtensor_rv_replace(fgraph, node): cleanup_ir_rewrites_db.name = "cleanup_ir_rewrites_db" logprob_rewrites_db.register( "cleanup_ir_rewrites", - TopoDB( - cleanup_ir_rewrites_db, - order="out_to_in", - ignore_newtrees=True, - failure_callback=None, - ), + TopoDB(cleanup_ir_rewrites_db, order="out_to_in", ignore_newtrees=True, failure_callback=None), "cleanup", ) diff --git a/pymc/sampling/mcmc.py b/pymc/sampling/mcmc.py index 748d16fae28..b2c68da9899 100644 --- a/pymc/sampling/mcmc.py +++ b/pymc/sampling/mcmc.py @@ -1104,9 +1104,7 @@ def _iter_sample( point, stats = step.step(point) trace.record(point, stats) log_warning_stats(stats) - diverging = ( - i > tune and len(stats) > 0 and (stats[0].get("diverging") == True) # noqa E712 - ) # noqa E712 + diverging = i > tune and len(stats) > 0 and (stats[0].get("diverging") is True) if callback is not None: callback( trace=trace, diff --git a/pymc/smc/sampling.py b/pymc/smc/sampling.py index 37c0a61e074..65a682a61f6 100644 --- a/pymc/smc/sampling.py +++ b/pymc/smc/sampling.py @@ -211,13 +211,7 @@ def sample_smc( if cores > 1: results = run_chains_parallel( - chains, - progressbar, - _sample_smc_int, - params, - random_seed, - kernel_kwargs, - cores, + chains, progressbar, _sample_smc_int, params, random_seed, kernel_kwargs, cores ) else: results = run_chains_sequential( diff --git a/pymc/testing.py b/pymc/testing.py index f0307af6fbf..fc8938fea6c 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -211,7 +211,7 @@ def RandomPdMatrix(n): Rdunif = Domain([-np.inf, -1, 0, 1, np.inf], "int64") Rplusunif = Domain([0, 0.5, np.inf]) Rplusdunif = Domain([0, 10, np.inf], "int64") -I = Domain([-np.inf, -3, -2, -1, 0, 1, 2, 3, np.inf], "int64") # noqa E741 +I = Domain([-np.inf, -3, -2, -1, 0, 1, 2, 3, np.inf], "int64") # noqa: E741 NatSmall = Domain([0, 3, 4, 5, np.inf], "int64") Nat = Domain([0, 1, 2, 3, np.inf], "int64") NatBig = Domain([0, 1, 2, 3, 5000, np.inf], "int64") @@ -875,9 +875,7 @@ def get_random_state(self, reset=False): def _instantiate_pymc_rv(self, dist_params=None): params = dist_params if dist_params else self.pymc_dist_params self.pymc_rv = self.pymc_dist.dist( - **params, - size=self.size, - rng=pytensor.shared(self.get_random_state(reset=True)), + **params, size=self.size, rng=pytensor.shared(self.get_random_state(reset=True)) ) def check_pymc_draws_match_reference(self): @@ -901,24 +899,8 @@ def check_pymc_params_match_rv_op(self): def check_rv_size(self): # test sizes - sizes_to_check = self.sizes_to_check or [ - None, - (), - 1, - (1,), - 5, - (4, 5), - (2, 4, 2), - ] - sizes_expected = self.sizes_expected or [ - (), - (), - (1,), - (1,), - (5,), - (4, 5), - (2, 4, 2), - ] + sizes_to_check = self.sizes_to_check or [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)] + sizes_expected = self.sizes_expected or [(), (), (1,), (1,), (5,), (4, 5), (2, 4, 2)] for size, expected in zip(sizes_to_check, sizes_expected): pymc_rv = self.pymc_dist.dist(**self.pymc_dist_params, size=size) expected_symbolic = tuple(pymc_rv.shape.eval()) @@ -936,11 +918,7 @@ def check_rv_size(self): k: p * np.ones(self.repeated_params_shape) for k, p in self.pymc_dist_params.items() } self._instantiate_pymc_rv(params) - sizes_to_check = [ - None, - self.repeated_params_shape, - (5, self.repeated_params_shape), - ] + sizes_to_check = [None, self.repeated_params_shape, (5, self.repeated_params_shape)] sizes_expected = [ (self.repeated_params_shape,), (self.repeated_params_shape,), From 10b5bb130ab8027b9564810200687874119982a1 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 21:03:22 +0100 Subject: [PATCH 09/22] undo : --- pymc/testing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymc/testing.py b/pymc/testing.py index fc8938fea6c..a227db06cc3 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -211,7 +211,7 @@ def RandomPdMatrix(n): Rdunif = Domain([-np.inf, -1, 0, 1, np.inf], "int64") Rplusunif = Domain([0, 0.5, np.inf]) Rplusdunif = Domain([0, 10, np.inf], "int64") -I = Domain([-np.inf, -3, -2, -1, 0, 1, 2, 3, np.inf], "int64") # noqa: E741 +I = Domain([-np.inf, -3, -2, -1, 0, 1, 2, 3, np.inf], "int64") # noqa E741 NatSmall = Domain([0, 3, 4, 5, np.inf], "int64") Nat = Domain([0, 1, 2, 3, np.inf], "int64") NatBig = Domain([0, 1, 2, 3, 5000, np.inf], "int64") From 60a375b3e3c94b5a50f73284a7051708d0d5a474 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 21:13:54 +0100 Subject: [PATCH 10/22] remove pylint and isort mentions --- docs/source/conf.py | 3 +-- pymc/__init__.py | 1 - pymc/backends/arviz.py | 8 ++++---- pymc/backends/base.py | 1 - pymc/gp/util.py | 2 +- pymc/logprob/__init__.py | 2 -- pymc/logprob/rewriting.py | 2 +- pymc/math.py | 6 ++---- pymc/model/core.py | 6 +++--- pymc/sampling_jax.py | 2 -- pymc/stats/log_likelihood.py | 2 -- tests/backends/test_arviz.py | 13 +++++-------- tests/distributions/test_continuous.py | 2 -- tests/distributions/test_multivariate.py | 4 ---- tests/distributions/test_timeseries.py | 2 -- tests/logprob/test_transform_value.py | 2 +- tests/model/test_core.py | 2 +- tests/sampling/test_forward.py | 2 +- versioneer.py | 5 ----- 19 files changed, 20 insertions(+), 47 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 6ed887214e1..9ef1d527ac3 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,6 +1,5 @@ """ Sphinx configuration file. - isort:skip_file """ #!/usr/bin/env python3 # @@ -19,7 +18,7 @@ import os from pathlib import Path -import pymc # isort:skip +import pymc # -- General configuration ------------------------------------------------ diff --git a/pymc/__init__.py b/pymc/__init__.py index 66d61e8abf3..83d147a3a95 100644 --- a/pymc/__init__.py +++ b/pymc/__init__.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# pylint: disable=wildcard-import import logging diff --git a/pymc/backends/arviz.py b/pymc/backends/arviz.py index b683fcf6f67..ef9925f28b3 100644 --- a/pymc/backends/arviz.py +++ b/pymc/backends/arviz.py @@ -15,7 +15,7 @@ import logging import warnings -from typing import ( # pylint: disable=unused-import +from typing import ( TYPE_CHECKING, Any, Dict, @@ -41,14 +41,14 @@ from pymc.util import get_default_varnames if TYPE_CHECKING: - from pymc.backends.base import MultiTrace # pylint: disable=invalid-name + from pymc.backends.base import MultiTrace ___all__ = [""] _log = logging.getLogger(__name__) # random variable object ... -Var = Any # pylint: disable=invalid-name +Var = Any def find_observations(model: "Model") -> Dict[str, Var]: @@ -163,7 +163,7 @@ def insert(self, k: str, v, idx: int): self.trace_dict[k][idx, :] = v -class InferenceDataConverter: # pylint: disable=too-many-instance-attributes +class InferenceDataConverter: """Encapsulate InferenceData specific logic.""" model: Optional[Model] = None diff --git a/pymc/backends/base.py b/pymc/backends/base.py index cf3dcfc227a..933e193cdf5 100644 --- a/pymc/backends/base.py +++ b/pymc/backends/base.py @@ -197,7 +197,6 @@ def _set_sampler_vars(self, sampler_vars): self.sampler_vars = sampler_vars - # pylint: disable=unused-argument def setup(self, draws, chain, sampler_vars=None) -> None: """Perform chain-specific setup. diff --git a/pymc/gp/util.py b/pymc/gp/util.py index 5d91d3cb5a5..39eb3b6780e 100644 --- a/pymc/gp/util.py +++ b/pymc/gp/util.py @@ -27,7 +27,7 @@ from pymc.model import modelcontext from pymc.pytensorf import compile_pymc -_ = Distribution # keep both pylint and black happy +_ = Distribution JITTER_DEFAULT = 1e-6 diff --git a/pymc/logprob/__init__.py b/pymc/logprob/__init__.py index 89e9e3b6430..bed9ee3a9c8 100644 --- a/pymc/logprob/__init__.py +++ b/pymc/logprob/__init__.py @@ -42,7 +42,6 @@ transformed_conditional_logp, ) -# isort: off # Add rewrites to the DBs import pymc.logprob.binary import pymc.logprob.censoring @@ -54,7 +53,6 @@ import pymc.logprob.tensor import pymc.logprob.transforms -# isort: on __all__ = ( "logp", diff --git a/pymc/logprob/rewriting.py b/pymc/logprob/rewriting.py index b79815fb96c..2ed641f3b18 100644 --- a/pymc/logprob/rewriting.py +++ b/pymc/logprob/rewriting.py @@ -145,7 +145,7 @@ def apply(self, fgraph): continue if not any(out in rv_map_feature.needs_measuring for out in node.outputs): continue - for node_rewriter in self.node_tracker.get_trackers(node.op): # noqa: F402 + for node_rewriter in self.node_tracker.get_trackers(node.op): # noqa F402 node_rewriter_change = self.process_node(fgraph, node, node_rewriter) if not node_rewriter_change: continue diff --git a/pymc/math.py b/pymc/math.py index 6bbd4ec4ef2..ddfcaa44d45 100644 --- a/pymc/math.py +++ b/pymc/math.py @@ -21,14 +21,13 @@ import pytensor import pytensor.sparse import pytensor.tensor as pt -import pytensor.tensor.slinalg # pylint: disable=unused-import +import pytensor.tensor.slinalg import scipy as sp -import scipy.sparse # pylint: disable=unused-import +import scipy.sparse from pytensor.graph.basic import Apply from pytensor.graph.op import Op -# pylint: disable=unused-import from pytensor.tensor import ( abs, and_, @@ -99,7 +98,6 @@ from pymc.pytensorf import floatX, ix_, largest_common_dtype -# pylint: enable=unused-import __all__ = [ "abs", diff --git a/pymc/model/core.py b/pymc/model/core.py index 0281387ad94..01f35c45b78 100644 --- a/pymc/model/core.py +++ b/pymc/model/core.py @@ -109,7 +109,7 @@ class ContextMeta(type): the `with` statement. """ - def __new__(cls, name, bases, dct, **kwargs): # pylint: disable=unused-argument + def __new__(cls, name, bases, dct, **kwargs): """Add __enter__ and __exit__ methods to the class.""" def __enter__(self): @@ -121,7 +121,7 @@ def __enter__(self): self._config_context.__enter__() return self - def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument + def __exit__(self, typ, value, traceback): self.__class__.context_class.get_contexts().pop() # self._pytensor_config is set in Model.__new__ if self._config_context: @@ -138,7 +138,7 @@ def __exit__(self, typ, value, traceback): # pylint: disable=unused-argument # FIXME: is there a more elegant way to automatically add methods to the class that # are instance methods instead of class methods? - def __init__(cls, name, bases, nmspc, context_class: Optional[Type] = None, **kwargs): # pylint: disable=unused-argument + def __init__(cls, name, bases, nmspc, context_class: Optional[Type] = None, **kwargs): """Add ``__enter__`` and ``__exit__`` methods to the new class automatically.""" if context_class is not None: cls._context_class = context_class diff --git a/pymc/sampling_jax.py b/pymc/sampling_jax.py index 6bb0a1631ec..21b5961a9c9 100644 --- a/pymc/sampling_jax.py +++ b/pymc/sampling_jax.py @@ -14,8 +14,6 @@ # This file exists only for backward-compatibility with imports like # `import pymc.sampling_jax` or `from pymc import sampling_jax`. -# pylint: disable=wildcard-import -# pylint: disable=unused-wildcard-import import warnings warnings.warn("This module is deprecated, use pymc.sampling.jax", DeprecationWarning) diff --git a/pymc/stats/log_likelihood.py b/pymc/stats/log_likelihood.py index c164b5a4f8e..a581b249662 100644 --- a/pymc/stats/log_likelihood.py +++ b/pymc/stats/log_likelihood.py @@ -69,7 +69,6 @@ def compute_log_likelihood( raise ValueError(f"var_names must refer to observed_RVs in the model. Got: {var_names}") # We need to temporarily disable transforms, because the InferenceData only keeps the untransformed values - # pylint: disable=used-before-assignment try: original_rvs_to_values = model.rvs_to_values original_rvs_to_transforms = model.rvs_to_transforms @@ -89,7 +88,6 @@ def compute_log_likelihood( finally: model.rvs_to_values = original_rvs_to_values model.rvs_to_transforms = original_rvs_to_transforms - # pylint: enable=used-before-assignment # Ignore Deterministics posterior_values = posterior[[rv.name for rv in model.free_RVs]] diff --git a/tests/backends/test_arviz.py b/tests/backends/test_arviz.py index 32ca2edce9c..e332d7742ad 100644 --- a/tests/backends/test_arviz.py +++ b/tests/backends/test_arviz.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# pylint: disable=no-member, invalid-name, redefined-outer-name, protected-access, too-many-public-methods import warnings from typing import Dict, Tuple @@ -359,9 +358,7 @@ def test_mv_missing_data_model(self): with model: mu = pm.Normal("mu", 0, 1, size=2) sd_dist = pm.HalfNormal.dist(1.0, size=2) - # pylint: disable=unpacking-non-sequence chol, *_ = pm.LKJCholeskyCov("chol_cov", n=2, eta=1, sd_dist=sd_dist) - # pylint: enable=unpacking-non-sequence with pytest.warns(ImputationWarning): y = pm.MvNormal("y", mu=mu, chol=chol, observed=data) inference_data = pm.sample( @@ -443,7 +440,7 @@ def test_constant_data(self, use_context): y = pm.MutableData("y", [1.0, 2.0, 3.0]) beta_sigma = pm.MutableData("beta_sigma", 1) beta = pm.Normal("beta", 0, beta_sigma) - obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable + obs = pm.Normal("obs", x * beta, 1, observed=y) trace = pm.sample(100, chains=2, tune=100, return_inferencedata=False) if use_context: inference_data = to_inference_data(trace=trace, log_likelihood=True) @@ -466,7 +463,7 @@ def test_predictions_constant_data(self): x = pm.ConstantData("x", [1.0, 2.0, 3.0]) y = pm.MutableData("y", [1.0, 2.0, 3.0]) beta = pm.Normal("beta", 0, 1) - obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable + obs = pm.Normal("obs", x * beta, 1, observed=y) trace = pm.sample(100, tune=100, return_inferencedata=False) inference_data = to_inference_data(trace) @@ -478,7 +475,7 @@ def test_predictions_constant_data(self): x = pm.MutableData("x", [1.0, 2.0]) y = pm.ConstantData("y", [1.0, 2.0]) beta = pm.Normal("beta", 0, 1) - obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable + obs = pm.Normal("obs", x * beta, 1, observed=y) predictive_trace = pm.sample_posterior_predictive( inference_data, return_inferencedata=False ) @@ -506,7 +503,7 @@ def test_no_trace(self): x = pm.ConstantData("x", [1.0, 2.0, 3.0]) y = pm.MutableData("y", [1.0, 2.0, 3.0]) beta = pm.Normal("beta", 0, 1) - obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable + obs = pm.Normal("obs", x * beta, 1, observed=y) idata = pm.sample(100, tune=100) prior = pm.sample_prior_predictive(return_inferencedata=False) posterior_predictive = pm.sample_posterior_predictive(idata, return_inferencedata=False) @@ -540,7 +537,7 @@ def test_priors_separation(self, use_context): x = pm.MutableData("x", [1.0, 2.0, 3.0]) y = pm.ConstantData("y", [1.0, 2.0, 3.0]) beta = pm.Normal("beta", 0, 1) - obs = pm.Normal("obs", x * beta, 1, observed=y) # pylint: disable=unused-variable + obs = pm.Normal("obs", x * beta, 1, observed=y) prior = pm.sample_prior_predictive(return_inferencedata=False) test_dict = { diff --git a/tests/distributions/test_continuous.py b/tests/distributions/test_continuous.py index 697b09ac59e..9cc441929ea 100644 --- a/tests/distributions/test_continuous.py +++ b/tests/distributions/test_continuous.py @@ -888,7 +888,6 @@ def test_moyal_icdf(self): def test_interpolated(self): for mu in R.vals: for sigma in Rplus.vals: - # pylint: disable=cell-var-from-loop xmin = mu - 5 * sigma xmax = mu + 5 * sigma @@ -2371,7 +2370,6 @@ def interpolated_rng_fn(self, size, mu, sigma, rng): def check_draws(self): for mu in R.vals: for sigma in Rplus.vals: - # pylint: disable=cell-var-from-loop rng = self.get_random_state() def ref_rand(size): diff --git a/tests/distributions/test_multivariate.py b/tests/distributions/test_multivariate.py index 3bee558124e..90c0c4a7ba0 100644 --- a/tests/distributions/test_multivariate.py +++ b/tests/distributions/test_multivariate.py @@ -1443,11 +1443,9 @@ def test_with_chol_rv(self): with pm.Model() as model: mu = pm.Normal("mu", 0.0, 1.0, size=3) sd_dist = pm.Exponential.dist(1.0, size=3) - # pylint: disable=unpacking-non-sequence chol, _, _ = pm.LKJCholeskyCov( "chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True ) - # pylint: enable=unpacking-non-sequence mv = pm.MvNormal("mv", mu, chol=chol, size=4) prior = pm.sample_prior_predictive(samples=10, return_inferencedata=False) @@ -1459,11 +1457,9 @@ def test_with_cov_rv( with pm.Model() as model: mu = pm.Normal("mu", 0.0, 1.0, shape=3) sd_dist = pm.Exponential.dist(1.0, shape=3) - # pylint: disable=unpacking-non-sequence chol, corr, stds = pm.LKJCholeskyCov( "chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True ) - # pylint: enable=unpacking-non-sequence mv = pm.MvNormal("mv", mu, cov=pm.math.dot(chol, chol.T), size=4) prior = pm.sample_prior_predictive(samples=10, return_inferencedata=False) diff --git a/tests/distributions/test_timeseries.py b/tests/distributions/test_timeseries.py index 7e9ecbbdd3f..5cc03c2633d 100644 --- a/tests/distributions/test_timeseries.py +++ b/tests/distributions/test_timeseries.py @@ -429,11 +429,9 @@ def test_mvgaussian_with_chol_cov_rv(self, param): with pm.Model() as model: mu = Normal("mu", 0.0, 1.0, shape=3) sd_dist = Exponential.dist(1.0, shape=3) - # pylint: disable=unpacking-non-sequence chol, corr, stds = LKJCholeskyCov( "chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True ) - # pylint: enable=unpacking-non-sequence with pytest.warns(UserWarning, match="Initial distribution not specified"): if param == "chol": mv = MvGaussianRandomWalk("mv", mu, chol=chol, shape=(10, 7, 3)) diff --git a/tests/logprob/test_transform_value.py b/tests/logprob/test_transform_value.py index 0976a239443..52cbe0a0062 100644 --- a/tests/logprob/test_transform_value.py +++ b/tests/logprob/test_transform_value.py @@ -257,7 +257,7 @@ def test_default_value_transform_logprob(pt_dist, dist_params, sp_dist, size): if a_val.ndim > 0: def jacobian_estimate_novec(value): - dim_diff = a_val.ndim - value.ndim # pylint: disable=cell-var-from-loop + dim_diff = a_val.ndim - value.ndim if dim_diff > 0: # Make sure the dimensions match the expected input # dimensions for the compiled backward transform function diff --git a/tests/model/test_core.py b/tests/model/test_core.py index 7bb8b28738d..d7c3f031a03 100644 --- a/tests/model/test_core.py +++ b/tests/model/test_core.py @@ -416,7 +416,7 @@ def test_multiple_observed_rv(): y2_data = np.random.randn(100) with pm.Model() as model: mu = pm.Normal("mu") - x = pm.CustomDist( # pylint: disable=unused-variable + x = pm.CustomDist( "x", mu, logp=lambda value, mu: pm.Normal.logp(value, mu, 1.0), observed=0.1 ) assert not model["x"] == model["mu"] diff --git a/tests/sampling/test_forward.py b/tests/sampling/test_forward.py index d719952c74e..b6b85709758 100644 --- a/tests/sampling/test_forward.py +++ b/tests/sampling/test_forward.py @@ -294,7 +294,7 @@ def test_lkj_cholesky_cov(self): with pm.Model() as model: mu = np.zeros(3) sd_dist = pm.Exponential.dist(1.0, size=3) - chol, corr, stds = pm.LKJCholeskyCov( # pylint: disable=unpacking-non-sequence + chol, corr, stds = pm.LKJCholeskyCov( "chol_packed", n=3, eta=2, sd_dist=sd_dist, compute_corr=True ) chol_packed = model["chol_packed"] diff --git a/versioneer.py b/versioneer.py index c2b9d28bc04..cf8adcc9ec8 100644 --- a/versioneer.py +++ b/versioneer.py @@ -273,11 +273,6 @@ [travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ -# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring -# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements -# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error -# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with -# pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno From 88402a7602ea2c12d15c3b2d784c42f1937b2e1e Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 21:25:50 +0100 Subject: [PATCH 11/22] remove setup.cgf --- pyproject.toml | 12 +++++++++++- setup.cfg | 12 ------------ 2 files changed, 11 insertions(+), 13 deletions(-) delete mode 100644 setup.cfg diff --git a/pyproject.toml b/pyproject.toml index 3178390c54c..5eaf1a5f326 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,5 @@ [tool.pytest.ini_options] +testpaths = ["tests"] minversion = "6.0" xfail_strict = true addopts = ["--color=yes"] @@ -9,7 +10,6 @@ line-length = 100 [tool.ruff.lint] ignore-init-module-imports = true ignore = [ - "E731", # Do not assign a lambda expression, use a def "F841", # Local variable name is assigned to but never used ] @@ -22,3 +22,13 @@ exclude_lines = [ "raise NotImplementedError", "if TYPE_CHECKING:", ] + +[tool.coverage.run] +omit = ["*examples*"] + +[tool.versioneer] +VCS = "git" +style = "pep440" +versionfile_source = "pymc/_version.py" +versionfile_build = "pymc/_version.py" +tag_prefix = "v" diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 5fec12d5d98..00000000000 --- a/setup.cfg +++ /dev/null @@ -1,12 +0,0 @@ -[tool:pytest] -testpaths = tests - -[coverage:run] -omit = *examples* - -[versioneer] -VCS = git -style = pep440 -versionfile_source = pymc/_version.py -versionfile_build = pymc/_version.py -tag_prefix = v From 2302db53190e220ddfc49f12d260212625d6913b Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 21:26:19 +0100 Subject: [PATCH 12/22] remove global ignore --- pymc/tuning/starting.py | 4 ++-- tests/distributions/test_continuous.py | 10 +++++----- tests/distributions/test_discrete.py | 2 +- tests/distributions/test_distribution.py | 4 ++-- tests/distributions/test_multivariate.py | 8 ++++---- tests/distributions/test_timeseries.py | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pymc/tuning/starting.py b/pymc/tuning/starting.py index ad5f554aed2..c8ceff55cf9 100644 --- a/pymc/tuning/starting.py +++ b/pymc/tuning/starting.py @@ -132,7 +132,7 @@ def find_MAP( # TODO: If the mapping is fixed, we can simply create graphs for the # mapping and avoid all this bijection overhead compiled_logp_func = DictToArrayBijection.mapf(model.compile_logp(jacobian=False), start) - logp_func = lambda x: compiled_logp_func(RaveledVars(x, x0.point_map_info)) + logp_func = lambda x: compiled_logp_func(RaveledVars(x, x0.point_map_info)) # noqa E731 rvs = [model.values_to_rvs[vars_dict[name]] for name, _, _ in x0.point_map_info] try: @@ -141,7 +141,7 @@ def find_MAP( compiled_dlogp_func = DictToArrayBijection.mapf( model.compile_dlogp(rvs, jacobian=False), start ) - dlogp_func = lambda x: compiled_dlogp_func(RaveledVars(x, x0.point_map_info)) + dlogp_func = lambda x: compiled_dlogp_func(RaveledVars(x, x0.point_map_info)) # noqa E731 compute_gradient = True except (AttributeError, NotImplementedError, tg.NullTypeGradError): compute_gradient = False diff --git a/tests/distributions/test_continuous.py b/tests/distributions/test_continuous.py index 9cc441929ea..fd7c3320d26 100644 --- a/tests/distributions/test_continuous.py +++ b/tests/distributions/test_continuous.py @@ -1846,7 +1846,7 @@ def halfstudentt_rng_fn(self, df, loc, scale, size, rng): pymc_dist_params = {"nu": 5.0, "sigma": 2.0} expected_rv_op_params = {"nu": 5.0, "sigma": 2.0} reference_dist_params = {"df": 5.0, "loc": 0, "scale": 2.0} - reference_dist = lambda self: ft.partial(self.halfstudentt_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.halfstudentt_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_pymc_params_match_rv_op", "check_pymc_draws_match_reference", @@ -2069,7 +2069,7 @@ def logit_normal_rng_fn(self, rng, size, loc, scale): pymc_dist_params = {"mu": 5.0, "sigma": 10.0} expected_rv_op_params = {"mu": 5.0, "sigma": 10.0} reference_dist_params = {"loc": 5.0, "scale": 10.0} - reference_dist = lambda self: ft.partial(self.logit_normal_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.logit_normal_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_pymc_params_match_rv_op", "check_pymc_draws_match_reference", @@ -2140,7 +2140,7 @@ class TestBeta(BaseTestDistributionRandom): expected_rv_op_params = {"alpha": 2.0, "beta": 5.0} reference_dist_params = {"a": 2.0, "b": 5.0} size = 15 - reference_dist = lambda self: ft.partial(clipped_beta_rvs, random_state=self.get_random_state()) + reference_dist = lambda self: ft.partial(clipped_beta_rvs, random_state=self.get_random_state()) # noqa E731 checks_to_run = [ "check_pymc_params_match_rv_op", "check_pymc_draws_match_reference", @@ -2340,7 +2340,7 @@ def polyagamma_rng_fn(self, size, h, z, rng): pymc_dist_params = {"h": 1.0, "z": 0.0} expected_rv_op_params = {"h": 1.0, "z": 0.0} reference_dist_params = {"h": 1.0, "z": 0.0} - reference_dist = lambda self: ft.partial(self.polyagamma_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.polyagamma_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_pymc_params_match_rv_op", "check_pymc_draws_match_reference", @@ -2361,7 +2361,7 @@ def interpolated_rng_fn(self, size, mu, sigma, rng): pymc_dist_params = {"x_points": x_points, "pdf_points": pdf_points} reference_dist_params = {"mu": mu, "sigma": sigma} - reference_dist = lambda self: ft.partial(self.interpolated_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.interpolated_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_rv_size", "check_draws", diff --git a/tests/distributions/test_discrete.py b/tests/distributions/test_discrete.py index e5934957407..d4d5fcb673b 100644 --- a/tests/distributions/test_discrete.py +++ b/tests/distributions/test_discrete.py @@ -842,7 +842,7 @@ def discrete_uniform_rng_fn(self, size, lower, upper, rng): pymc_dist_params = {"lower": -1, "upper": 9} expected_rv_op_params = {"lower": -1, "upper": 9} reference_dist_params = {"lower": -1, "upper": 9} - reference_dist = lambda self: ft.partial( + reference_dist = lambda self: ft.partial( # noqa E731 self.discrete_uniform_rng_fn, rng=self.get_random_state() ) checks_to_run = [ diff --git a/tests/distributions/test_distribution.py b/tests/distributions/test_distribution.py index 06582ae1797..9f4beec8df7 100644 --- a/tests/distributions/test_distribution.py +++ b/tests/distributions/test_distribution.py @@ -301,7 +301,7 @@ def logp(value, mu): ) def test_custom_dist_default_moment_univariate(self, moment, size, expected): if moment == "custom_moment": - moment = lambda rv, size, *rv_inputs: 5 * pt.ones(size, dtype=rv.dtype) + moment = lambda rv, size, *rv_inputs: 5 * pt.ones(size, dtype=rv.dtype) # noqa E731 with pm.Model() as model: x = CustomDist("x", moment=moment, size=size) assert isinstance(x.owner.op, CustomDistRV) @@ -821,7 +821,7 @@ def diracdelta_rng_fn(self, size, c): pymc_dist_params = {"c": 3} expected_rv_op_params = {"c": 3} reference_dist_params = {"c": 3} - reference_dist = lambda self: self.diracdelta_rng_fn + reference_dist = lambda self: self.diracdelta_rng_fn # noqa E731 checks_to_run = [ "check_pymc_params_match_rv_op", "check_pymc_draws_match_reference", diff --git a/tests/distributions/test_multivariate.py b/tests/distributions/test_multivariate.py index 90c0c4a7ba0..b85fb65cda5 100644 --- a/tests/distributions/test_multivariate.py +++ b/tests/distributions/test_multivariate.py @@ -1778,7 +1778,7 @@ def mvstudentt_rng_fn(self, size, nu, mu, scale, rng): "mu": np.array([1.0, 2.0]), "scale": np.array([[2.0, 0.0], [0.0, 3.5]]), } - reference_dist = lambda self: ft.partial(self.mvstudentt_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.mvstudentt_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_pymc_params_match_rv_op", "check_pymc_draws_match_reference", @@ -1981,7 +1981,7 @@ def wishart_rng_fn(self, size, nu, V, rng): (1, 3, 3), (4, 5, 3, 3), ] - reference_dist = lambda self: ft.partial(self.wishart_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.wishart_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_rv_size", "check_pymc_params_match_rv_op", @@ -2110,7 +2110,7 @@ def kronecker_rng_fn(self, size, mu, covs=None, sigma=None, rng=None): sizes_to_check = [None, (), 1, (1,), 5, (4, 5), (2, 4, 2)] sizes_expected = [(N,), (N,), (1, N), (1, N), (5, N), (4, 5, N), (2, 4, 2, N)] - reference_dist = lambda self: ft.partial(self.kronecker_rng_fn, rng=self.get_random_state()) + reference_dist = lambda self: ft.partial(self.kronecker_rng_fn, rng=self.get_random_state()) # noqa E731 checks_to_run = [ "check_pymc_draws_match_reference", "check_rv_size", @@ -2366,7 +2366,7 @@ def test_mvnormal_no_cholesky_in_model_logp(): data = np.ones((batch_size, n)) pm.MvNormal("y", mu=mu, chol=pt.broadcast_to(chol, (batch_size, n, n)), observed=data) - contains_cholesky_op = lambda fgraph: any( + contains_cholesky_op = lambda fgraph: any( # noqa E731 isinstance(node.op, Cholesky) for node in fgraph.apply_nodes ) diff --git a/tests/distributions/test_timeseries.py b/tests/distributions/test_timeseries.py index 5cc03c2633d..69b5ce46be5 100644 --- a/tests/distributions/test_timeseries.py +++ b/tests/distributions/test_timeseries.py @@ -952,7 +952,7 @@ def _gen_sde_path(sde, pars, dt, n, x0): xs.append(xs[-1] + f * dt + np.sqrt(dt) * g * wt[i]) return np.array(xs) - sde = lambda x, lam: (lam * x, sig2) + sde = lambda x, lam: (lam * x, sig2) # noqa E731 x = floatX(_gen_sde_path(sde, (lam,), dt, N, 5.0)) z = x + numpy_rng.standard_normal(size=x.size) * sig2 # build model From 3130413219bcbf44213b28050f7c405d88dc8a2f Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Mon, 8 Jan 2024 21:34:58 +0100 Subject: [PATCH 13/22] undo remove setup.cfg --- pyproject.toml | 7 ------- setup.cfg | 6 ++++++ 2 files changed, 6 insertions(+), 7 deletions(-) create mode 100644 setup.cfg diff --git a/pyproject.toml b/pyproject.toml index 5eaf1a5f326..3178c81040a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,10 +25,3 @@ exclude_lines = [ [tool.coverage.run] omit = ["*examples*"] - -[tool.versioneer] -VCS = "git" -style = "pep440" -versionfile_source = "pymc/_version.py" -versionfile_build = "pymc/_version.py" -tag_prefix = "v" diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000000..c015cd4a343 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,6 @@ +[versioneer] +VCS = git +style = pep440 +versionfile_source = pymc/_version.py +versionfile_build = pymc/_version.py +tag_prefix = v From e6d3240c770cff1ca6610c068f3c1540b0c48492 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Tue, 9 Jan 2024 10:50:30 +0100 Subject: [PATCH 14/22] add explanation __init__ ignore --- pyproject.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 3178c81040a..71c071e3f0f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,11 @@ ignore = [ ] [tool.ruff.extend-per-file-ignores] -"__init__.py" = ["E402", "F401", "F403"] +"__init__.py" = [ + "E402", # Module level import not at top of file + "F401", # Module imported but unused + "F403", # 'from module import *' used; unable to detect undefined names +] [tool.coverage.report] exclude_lines = [ From 6819090f9d2e65d80b6a257d63ea8bdea4900689 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Tue, 9 Jan 2024 20:22:48 +0100 Subject: [PATCH 15/22] add isort --- pyproject.toml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 71c071e3f0f..33d78a798fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,17 +8,29 @@ addopts = ["--color=yes"] line-length = 100 [tool.ruff.lint] +select = ["E4", "E7", "E9", "F", "I"] ignore-init-module-imports = true ignore = [ "F841", # Local variable name is assigned to but never used ] +[tool.ruff.lint.isort] +lines-between-types = 1 + [tool.ruff.extend-per-file-ignores] "__init__.py" = [ - "E402", # Module level import not at top of file "F401", # Module imported but unused "F403", # 'from module import *' used; unable to detect undefined names ] +"pymc/__init__.py" = [ + "E402", # Module level import not at top of file +] +"pymc/stats/__init__.py" = [ + "E402", # Module level import not at top of file +] +"pymc/logprob/__init__.py" = [ + "I001", # Import block is un-sorted or un-formatted +] [tool.coverage.report] exclude_lines = [ From c58501bed6eec60ded25671e28e627897d6bdc14 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Tue, 9 Jan 2024 20:23:56 +0100 Subject: [PATCH 16/22] fix isort --- docs/source/conf.py | 1 + pymc/distributions/multivariate.py | 3 +-- pymc/distributions/transforms.py | 1 + pymc/math.py | 2 -- tests/distributions/test_mixture.py | 1 - 5 files changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9ef1d527ac3..4dba6d9f7ba 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -16,6 +16,7 @@ # serve to show the default. import os + from pathlib import Path import pymc diff --git a/pymc/distributions/multivariate.py b/pymc/distributions/multivariate.py index 570c13988e9..03ea4f3a5da 100644 --- a/pymc/distributions/multivariate.py +++ b/pymc/distributions/multivariate.py @@ -30,9 +30,8 @@ from pytensor.raise_op import Assert from pytensor.sparse.basic import sp_sum from pytensor.tensor import TensorConstant, gammaln, sigmoid -from pytensor.tensor.linalg import cholesky, det, eigh +from pytensor.tensor.linalg import cholesky, det, eigh, solve_triangular, trace from pytensor.tensor.linalg import inv as matrix_inverse -from pytensor.tensor.linalg import solve_triangular, trace from pytensor.tensor.random.basic import dirichlet, multinomial, multivariate_normal from pytensor.tensor.random.op import RandomVariable from pytensor.tensor.random.utils import ( diff --git a/pymc/distributions/transforms.py b/pymc/distributions/transforms.py index 3e2a5a06914..7c0d97100b3 100644 --- a/pymc/distributions/transforms.py +++ b/pymc/distributions/transforms.py @@ -18,6 +18,7 @@ import numpy as np import pytensor.tensor as pt + # ignore mypy error because it somehow considers that # "numpy.core.numeric has no attribute normalize_axis_tuple" from numpy.core.numeric import normalize_axis_tuple # type: ignore diff --git a/pymc/math.py b/pymc/math.py index ddfcaa44d45..7c9ceaa9ecb 100644 --- a/pymc/math.py +++ b/pymc/math.py @@ -27,7 +27,6 @@ from pytensor.graph.basic import Apply from pytensor.graph.op import Op - from pytensor.tensor import ( abs, and_, @@ -98,7 +97,6 @@ from pymc.pytensorf import floatX, ix_, largest_common_dtype - __all__ = [ "abs", "and_", diff --git a/tests/distributions/test_mixture.py b/tests/distributions/test_mixture.py index 8dfa5a8ca8d..be3e1bcbdf6 100644 --- a/tests/distributions/test_mixture.py +++ b/tests/distributions/test_mixture.py @@ -14,7 +14,6 @@ import warnings - import numpy as np import pytensor import pytest From 03d5ece8b15cd2aae8bf02b1b2a6c81f0194dd76 Mon Sep 17 00:00:00 2001 From: Juan Orduz Date: Thu, 11 Jan 2024 12:36:52 +0100 Subject: [PATCH 17/22] Update .pre-commit-config.yaml Co-authored-by: Ben Mares --- .pre-commit-config.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 00fe8e811ed..f665620df11 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,11 +23,6 @@ repos: - --exclude=scripts/ - --exclude=binder/ - --exclude=versioneer.py -- repo: https://github.com/asottile/pyupgrade - rev: v3.15.0 - hooks: - - id: pyupgrade - args: [--py37-plus] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.11 hooks: From 92937f3e3c898a7c90b04b04341d4f1b75e275cf Mon Sep 17 00:00:00 2001 From: Juan Orduz Date: Thu, 11 Jan 2024 12:37:15 +0100 Subject: [PATCH 18/22] Update pyproject.toml Co-authored-by: Ben Mares --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 33d78a798fe..253520aaf94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ addopts = ["--color=yes"] line-length = 100 [tool.ruff.lint] -select = ["E4", "E7", "E9", "F", "I"] +select = ["E4", "E7", "E9", "F", "I", "UP"] ignore-init-module-imports = true ignore = [ "F841", # Local variable name is assigned to but never used From 2b3a34a0527b4258f6f18f0187e9116edf412f0e Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Thu, 11 Jan 2024 13:08:37 +0100 Subject: [PATCH 19/22] fix UP --- docs/source/contributing/python_style.md | 2 +- pymc/_version.py | 5 +---- pymc/gp/util.py | 2 +- pymc/sampling/population.py | 2 +- pymc/step_methods/arraystep.py | 6 ++---- pymc/step_methods/hmc/integration.py | 4 ++-- pymc/variational/inference.py | 10 ++-------- pymc/variational/opvi.py | 16 ++++++---------- pymc/variational/updates.py | 4 +--- versioneer.py | 8 ++------ 10 files changed, 19 insertions(+), 40 deletions(-) diff --git a/docs/source/contributing/python_style.md b/docs/source/contributing/python_style.md index def33f05314..5bb73d503ca 100644 --- a/docs/source/contributing/python_style.md +++ b/docs/source/contributing/python_style.md @@ -33,7 +33,7 @@ git commit -m "wip lol" --no-verify To skip one particular hook, you can set the `SKIP` environment variable. E.g. (on Linux): ```bash -SKIP=pyupgrade git commit -m "" +SKIP=ruff git commit -m "" ``` You can manually run all `pre-commit` hooks on all files with diff --git a/pymc/_version.py b/pymc/_version.py index 24c8e98bec7..ebbf5a3a2e7 100644 --- a/pymc/_version.py +++ b/pymc/_version.py @@ -153,10 +153,7 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): root = os.path.dirname(root) # up a level if verbose: - print( - "Tried directories %s but none started with prefix %s" - % (str(rootdirs), parentdir_prefix) - ) + print(f"Tried directories {str(rootdirs)} but none started with prefix {parentdir_prefix}") raise NotThisMethod("rootdir doesn't start with parentdir_prefix") diff --git a/pymc/gp/util.py b/pymc/gp/util.py index 39eb3b6780e..3f829ab002b 100644 --- a/pymc/gp/util.py +++ b/pymc/gp/util.py @@ -120,7 +120,7 @@ def kmeans_inducing_points(n_inducing, X, **kmeans_kwargs): "To use K-means initialization, " "please provide X as a type that " "can be cast to np.ndarray, instead " - "of {}".format(type(X)) + f"of {type(X)}" ) scaling = np.std(X, 0) # if std of a column is very small (zero), don't normalize that column diff --git a/pymc/sampling/population.py b/pymc/sampling/population.py index 6dbe5cf9084..5ed98c0a2fe 100644 --- a/pymc/sampling/population.py +++ b/pymc/sampling/population.py @@ -135,7 +135,7 @@ def warn_population_size( if has_demcmc and chains <= initial_point_model_size: warnings.warn( "DEMetropolis should be used with more chains than dimensions! " - "(The model has {} dimensions.)".format(initial_point_model_size), + f"(The model has {initial_point_model_size} dimensions.)", UserWarning, stacklevel=2, ) diff --git a/pymc/step_methods/arraystep.py b/pymc/step_methods/arraystep.py index 4c945ab51b4..35f1443d814 100644 --- a/pymc/step_methods/arraystep.py +++ b/pymc/step_methods/arraystep.py @@ -146,10 +146,8 @@ def link_population(self, population, chain_index): self.other_chains = [c for c in range(len(population)) if c != chain_index] if not len(self.other_chains) > 1: raise ValueError( - "Population is just {} + {}. " - "This is too small and the error should have been raised earlier.".format( - self.this_chain, self.other_chains - ) + f"Population is just {self.this_chain} + {self.other_chains}. " + "This is too small and the error should have been raised earlier." ) return diff --git a/pymc/step_methods/hmc/integration.py b/pymc/step_methods/hmc/integration.py index 8ca72ecc4f5..c8defa2e819 100644 --- a/pymc/step_methods/hmc/integration.py +++ b/pymc/step_methods/hmc/integration.py @@ -44,8 +44,8 @@ def __init__(self, potential: QuadPotential, logp_dlogp_func): self._dtype = self._logp_dlogp_func.dtype if self._potential.dtype != self._dtype: raise ValueError( - "dtypes of potential (%s) and logp function (%s)" - "don't match." % (self._potential.dtype, self._dtype) + f"dtypes of potential ({self._potential.dtype}) and logp function ({self._dtype})" + "don't match." ) def compute_state(self, q: RaveledVars, p: RaveledVars): diff --git a/pymc/variational/inference.py b/pymc/variational/inference.py index ce3bd02e298..6ee5815d145 100644 --- a/pymc/variational/inference.py +++ b/pymc/variational/inference.py @@ -244,17 +244,11 @@ def _infmean(input_array): if isinstance(e, StopIteration): logger.info(str(e)) if n < 10: - logger.info( - "Interrupted at {:,d} [{:.0f}%]: Loss = {:,.5g}".format( - i, 100 * i // n, scores[i] - ) - ) + logger.info(f"Interrupted at {i:,d} [{100 * i // n:.0f}%]: Loss = {scores[i]:,.5g}") else: avg_loss = _infmean(scores[min(0, i - 1000) : i + 1]) logger.info( - "Interrupted at {:,d} [{:.0f}%]: Average Loss = {:,.5g}".format( - i, 100 * i // n, avg_loss - ) + f"Interrupted at {i:,d} [{100 * i // n:.0f}%]: Average Loss = {avg_loss:,.5g}" ) else: if n == 0: diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index bd1874ffe8f..ae2e20ac876 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -458,8 +458,8 @@ def __init__(self, approx): self.approx = approx if self.require_logq and not approx.has_logq: raise ExplicitInferenceError( - "%s requires logq, but %s does not implement it" - "please change inference method" % (self, approx) + f"{self} requires logq, but {approx} does not implement it" + "please change inference method" ) inputs = property(lambda self: self.approx.inputs) @@ -510,9 +510,7 @@ def __call__(self, f=None): return self.objective_class(self, f) def __str__(self): # pragma: no cover - return "%(op)s[%(ap)s]" % dict( - op=self.__class__.__name__, ap=self.approx.__class__.__name__ - ) + return f"{self.__class__.__name__}[{self.approx.__class__.__name__}]" def collect_shared_to_list(params): @@ -701,8 +699,8 @@ def register(cls, sbcls): def group_for_params(cls, params): if frozenset(params) not in cls.__param_registry: raise KeyError( - "No such group for the following params: {!r}, " - "only the following are supported\n\n{}".format(params, cls.__param_registry) + f"No such group for the following params: {params!r}, " + f"only the following are supported\n\n{cls.__param_registry}" ) return cls.__param_registry[frozenset(params)] @@ -801,9 +799,7 @@ def _check_user_params(self, **kwargs): if givens != needed: raise ParametrizationError( "Passed parameters do not have a needed set of keys, " - "they should be equal, got {givens}, needed {needed}".format( - givens=givens, needed=needed - ) + f"they should be equal, got {givens}, needed {needed}" ) self._user_params = dict() spec = self.get_param_spec_for(d=self.ddim, **kwargs.pop("spec_kw", {})) diff --git a/pymc/variational/updates.py b/pymc/variational/updates.py index fa6e52b0e1f..4f46970d131 100644 --- a/pymc/variational/updates.py +++ b/pymc/variational/updates.py @@ -1005,9 +1005,7 @@ def norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-7): elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer sum_over = tuple(range(1, ndim)) else: - raise ValueError( - "Unsupported tensor dimensionality {}." "Must specify `norm_axes`".format(ndim) - ) + raise ValueError(f"Unsupported tensor dimensionality {ndim}." "Must specify `norm_axes`") dtype = np.dtype(pytensor.config.floatX).type norms = pt.sqrt(pt.sum(pt.sqr(tensor_var), axis=sum_over, keepdims=True)) diff --git a/versioneer.py b/versioneer.py index cf8adcc9ec8..9c8f7b060f3 100644 --- a/versioneer.py +++ b/versioneer.py @@ -325,8 +325,7 @@ def get_root(): vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print( - "Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(my_path), versioneer_py) + f"Warning: build in {os.path.dirname(my_path)} is using versioneer.py from {versioneer_py}" ) except NameError: pass @@ -1384,10 +1383,7 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): root = os.path.dirname(root) # up a level if verbose: - print( - "Tried directories %s but none started with prefix %s" - % (str(rootdirs), parentdir_prefix) - ) + print(f"Tried directories {str(rootdirs)} but none started with prefix {parentdir_prefix}") raise NotThisMethod("rootdir doesn't start with parentdir_prefix") From fb3e290ad140d6436c99a1d15aad3ebf447a27f8 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Thu, 11 Jan 2024 13:26:23 +0100 Subject: [PATCH 20/22] add D --- .pre-commit-config.yaml | 8 -------- pymc/testing.py | 3 ++- pymc/variational/opvi.py | 2 ++ pyproject.toml | 26 +++++++++++++++++++++++++- 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f665620df11..93ddab110f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,14 +30,6 @@ repos: args: ["--fix", "--show-source"] - id: ruff-format args: ["--line-length=100"] -- repo: https://github.com/PyCQA/pydocstyle - rev: 6.3.0 - hooks: - - id: pydocstyle - args: - - --ignore=D100,D101,D102,D103,D104,D105,D107,D200,D202,D203,D204,D205,D209,D212,D213,D301,D400,D401,D403,D413,D415,D417 - files: ^pymc/ - exclude: ^pymc/tests/ - repo: https://github.com/MarcoGorelli/madforhooks rev: 0.4.1 hooks: diff --git a/pymc/testing.py b/pymc/testing.py index a227db06cc3..ba3ff1f285c 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -61,11 +61,12 @@ def product(domains, n_samples=-1): """Get an iterator over a product of domains. Args: + ---- domains: a dictionary of (name, object) pairs, where the objects must be "domain-like", as in, have a `.vals` property n_samples: int, maximum samples to return. -1 to return whole product - Returns + Returns: ------- list of the cartesian product of the domains """ diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index ae2e20ac876..98b1247b12f 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -815,6 +815,7 @@ def _initial_type(self, name): ---------- name: str name for tensor + Returns ------- tensor @@ -828,6 +829,7 @@ def _input_type(self, name): ---------- name: str name for tensor + Returns ------- tensor diff --git a/pyproject.toml b/pyproject.toml index 253520aaf94..2a853c716b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,10 +8,32 @@ addopts = ["--color=yes"] line-length = 100 [tool.ruff.lint] -select = ["E4", "E7", "E9", "F", "I", "UP"] +select = ["D", "E4", "E7", "E9", "F", "I", "UP"] ignore-init-module-imports = true ignore = [ "F841", # Local variable name is assigned to but never used + "D100", + "D101", + "D102", + "D103", + "D104", + "D105", + "D107", + "D200", + "D202", + "D203", + "D204", + "D205", + "D209", + "D212", + "D213", + "D301", + "D400", + "D401", + "D403", + "D413", + "D415", + "D417", ] [tool.ruff.lint.isort] @@ -22,6 +44,7 @@ lines-between-types = 1 "F401", # Module imported but unused "F403", # 'from module import *' used; unable to detect undefined names ] +"docs/source/*" = ["D"] "pymc/__init__.py" = [ "E402", # Module level import not at top of file ] @@ -31,6 +54,7 @@ lines-between-types = 1 "pymc/logprob/__init__.py" = [ "I001", # Import block is un-sorted or un-formatted ] +"tests/*" = ["D"] [tool.coverage.report] exclude_lines = [ From 05c9c68edb3b42d5cc5a60a391d11c588651a508 Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Thu, 11 Jan 2024 13:30:39 +0100 Subject: [PATCH 21/22] add W --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2a853c716b8..1a331694754 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ addopts = ["--color=yes"] line-length = 100 [tool.ruff.lint] -select = ["D", "E4", "E7", "E9", "F", "I", "UP"] +select = ["D", "E4", "E7", "E9", "F", "I", "UP", "W"] ignore-init-module-imports = true ignore = [ "F841", # Local variable name is assigned to but never used From ad003e1b3581d45fa3f93b65599cdcfbfbe583df Mon Sep 17 00:00:00 2001 From: juanitorduz Date: Thu, 11 Jan 2024 14:24:36 +0100 Subject: [PATCH 22/22] add E and ignore E501 --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1a331694754..c95d5532b44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,9 +8,10 @@ addopts = ["--color=yes"] line-length = 100 [tool.ruff.lint] -select = ["D", "E4", "E7", "E9", "F", "I", "UP", "W"] +select = ["D", "E", "F", "I", "UP", "W"] ignore-init-module-imports = true ignore = [ + "E501", "F841", # Local variable name is assigned to but never used "D100", "D101",