diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 1a12bdb..c4fd13e 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -2,9 +2,9 @@ name: Python package on: push: - branches: [ "master" ] pull_request: - branches: [ "master" ] + schedule: + - cron: '0 12 1 * *' # 12:00, first day of the month jobs: build: @@ -15,7 +15,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.8", "3.11"] + python-version: ["3.9", "3.12"] steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index 26eeb92..9ffaaa7 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ When using this package please cite: ### Contributor -* Lewis Blake +* Lewis Blake, ### Contributions Contributions via pull requests are welcome. diff --git a/setup.py b/setup.py index 73306ad..a66cbd0 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name="optimparallel", - version="0.1.2", + version="0.1.3", description="A parallel version of the L-BFGS-B optimizer of scipy.optimize.minimize().", py_modules=["optimparallel"], package_dir={"": "src"}, @@ -15,11 +15,10 @@ long_description_content_type="text/markdown", classifiers=[ "Development Status :: 3 - Alpha", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Mathematics", diff --git a/src/optimparallel.py b/src/optimparallel.py index 3e66146..3a945a1 100644 --- a/src/optimparallel.py +++ b/src/optimparallel.py @@ -13,14 +13,18 @@ - optimparallel: same as `minimize_parallel()` - fmin_l_bfgs_b_parallel: parallel version of `scipy.optimize.fmin_l_bfgs_b()` """ +from __future__ import annotations import warnings import concurrent.futures import functools import itertools import numpy as np -from scipy.optimize import minimize +from scipy.optimize import minimize, Bounds import time +from typing import Any, Callable +from numpy.typing import ArrayLike + __all__ = [ "minimize_parallel", @@ -33,15 +37,15 @@ class EvalParallel: def __init__( self, - fun, - jac=None, - args=(), - eps=1e-8, + fun: Callable, + jac: Any | None = None, + args: tuple[Any] = (), + eps: float = 1e-8, executor=concurrent.futures.ProcessPoolExecutor(), - forward=True, - loginfo=False, - verbose=False, - n=1, + forward: bool = True, + loginfo: bool = False, + verbose: bool = False, + n: int = 1, ): self.fun_in = fun self.jac_in = jac @@ -64,7 +68,9 @@ def __init__( # static helper methods are used for parallel execution with map() @staticmethod - def _eval_approx_args(args, eps_at, fun, x, eps): + def _eval_approx_args( + args: tuple[Any], eps_at: float, fun: Any, x: ArrayLike, eps: float + ): # 'fun' has additional 'args' if eps_at == 0: x_ = x @@ -77,7 +83,7 @@ def _eval_approx_args(args, eps_at, fun, x, eps): return fun(x_, *args) @staticmethod - def _eval_approx(eps_at, fun, x, eps): + def _eval_approx(eps_at: float, fun: Callable, x: ArrayLike, eps: float): # 'fun' has no additional 'args' if eps_at == 0: x_ = x @@ -90,20 +96,26 @@ def _eval_approx(eps_at, fun, x, eps): return fun(x_) @staticmethod - def _eval_fun_jac_args(args, which, fun, jac, x): + def _eval_fun_jac_args( + args: tuple[Any], + which: int, + fun: Callable, + jac: Callable, + x: ArrayLike, + ): # 'fun' and 'jec; have additional 'args' if which == 0: return fun(x, *args) return np.array(jac(x, *args)) @staticmethod - def _eval_fun_jac(which, fun, jac, x): + def _eval_fun_jac(which: int, fun: Callable, jac: Callable, x: ArrayLike): # 'fun' and 'jac' have no additionals 'args' if which == 0: return fun(x) return np.array(jac(x)) - def eval_parallel(self, x): + def eval_parallel(self, x: ArrayLike): # function to evaluate 'fun' and 'jac' in parallel # - if 'jac' is None, the gradient is computed numerically # - if 'forward' is True, the numerical gradient uses the @@ -139,10 +151,10 @@ def eval_parallel(self, x): ret = np.array(list(ret)) self.fun_val = ret[0] if self.forward: - self.jac_val = (ret[1: (len(x) + 1)] - self.fun_val) / self.eps + self.jac_val = (ret[1 : (len(x) + 1)] - self.fun_val) / self.eps else: self.jac_val = ( - ret[1: (len(x) + 1)] - ret[(len(x) + 1): 2 * len(x) + 1] + ret[1 : (len(x) + 1)] - ret[(len(x) + 1) : 2 * len(x) + 1] ) / (2 * self.eps) # 'jac' function is not None @@ -175,7 +187,7 @@ def eval_parallel(self, x): self.info["jac"].append(self.jac_val[0]) return None - def fun(self, x): + def fun(self, x: ArrayLike): self.eval_parallel(x=x) if self.verbose: print("fun(" + str(x) + ") = " + str(self.fun_val)) @@ -189,15 +201,15 @@ def jac(self, x): def minimize_parallel( - fun, - x0, - args=(), - jac=None, - bounds=None, - tol=None, - options=None, - callback=None, - parallel=None, + fun: Callable, + x0: ArrayLike, + args: tuple[Any] = (), + jac: Callable | None = None, + bounds: Bounds | None = None, + tol: float | None = None, + options: dict | None = None, + callback: Callable | None = None, + parallel: dict | None = None, ): """ A parallel version of the L-BFGS-B optimizer of @@ -368,7 +380,7 @@ def minimize_parallel( "verbose": False, "loginfo": False, "time": False, - "executor": None + "executor": None, } if parallel is not None: if not isinstance(parallel, dict): @@ -378,9 +390,9 @@ def minimize_parallel( if parallel_used.get("time"): time_start = time.time() - if parallel_used.get('executor') is None: + if parallel_used.get("executor") is None: parallel_used["executor"] = concurrent.futures.ProcessPoolExecutor( - max_workers=parallel_used.get("max_workers", None) + max_workers=parallel_used.get("max_workers", None) ) with parallel_used.get("executor") as executor: @@ -426,23 +438,23 @@ def minimize_parallel( def fmin_l_bfgs_b_parallel( - func, - x0, - fprime=None, - args=(), - approx_grad=0, - bounds=None, - m=10, - factr=1e7, - pgtol=1e-5, - epsilon=1e-8, - iprint=-1, - maxfun=15000, - maxiter=15000, - disp=None, - callback=None, - maxls=20, - parallel=None, + func: Callable, + x0: ArrayLike, + fprime: Callable | None = None, + args: tuple[Any] | None = (), + approx_grad: bool | None = 0, + bounds: Bounds | None = None, + m: int | None = 10, + factr: float | None = 1e7, + pgtol: float | None = 1e-5, + epsilon: float | None = 1e-8, + iprint: int | None = -1, + maxfun: int | None = 15000, + maxiter: int | None = 15000, + disp: int | None = None, + callback: Callable | None = None, + maxls: int | None = 20, + parallel: dict = None, ): """ A parallel version of the L-BFGS-B optimizer `fmin_l_bfgs_b()`. diff --git a/test/test_fmin_l_bfgs_b_parallel.py b/test/test_fmin_l_bfgs_b_parallel.py index 5233fb1..93be12d 100644 --- a/test/test_fmin_l_bfgs_b_parallel.py +++ b/test/test_fmin_l_bfgs_b_parallel.py @@ -1,4 +1,5 @@ # test fmin_l_bfgs_b_parallel() +from __future__ import annotations import pytest import itertools import numpy as np @@ -10,15 +11,15 @@ func, fprime = None, None -def func0(x): +def func0(x: np.typing.ArrayLike): return sum((x - 3) ** 2) -def fprime0(x): +def fprime0(x: np.typing.ArrayLike): return 2 * (x - 3) -def func_arg1(x, a): +def func_arg1(x: np.typing.ArrayLike, a: np.typing.ArrayLike): return sum((x - a) ** 2) @@ -34,25 +35,25 @@ def fprime_arg2(x, a, b): return 2 * (x - a) -def func_upper0(x, ub): +def func_upper0(x: np.typing.ArrayLike, ub: float): if not any(x <= ub): raise ValueError("x has to be smaller than upper bound") return sum((x - 1) ** 2) -def fprime_upper0(x, ub): +def fprime_upper0(x: np.typing.ArrayLike, ub: float): if not any(x <= ub): raise ValueError("x has to be smaller than upper bound") return 2 * (x - 1) -def func_lower0(x, ub): +def func_lower0(x: np.typing.ArrayLike, ub: float): if not any(x >= ub): raise ValueError("x has to be larger than lower bound") return sum((x - 1) ** 2) -def fprime_lower0(x, ub): +def fprime_lower0(x: np.typing.ArrayLike, ub: float): if not any(x >= ub): raise ValueError("x has to be larger than lower bound") return 2 * (x - 1) diff --git a/test/test_minimize_parallel.py b/test/test_minimize_parallel.py index f5c63d6..7b096c6 100644 --- a/test/test_minimize_parallel.py +++ b/test/test_minimize_parallel.py @@ -1,4 +1,5 @@ # test minimize_parallel() +from __future__ import annotations import pytest import itertools import concurrent.futures @@ -142,7 +143,6 @@ def check_minimize( TRACEBACKHIDE=True, ATOL=1e-5, ): - """Helper function to minimize_parallel() against minimize().""" __tracebackhide__ = TRACEBACKHIDE @@ -158,7 +158,7 @@ def check_minimize( "verbose": verbose, "executor": None, } - + options = { "disp": disp, "maxcor": maxcor, @@ -184,6 +184,7 @@ def check_minimize( ATOL=ATOL, ) + @pytest.mark.parametrize("fun_id", [0]) @pytest.mark.parametrize("x0", [np.array([1]), np.array([1, 2])]) @pytest.mark.parametrize("approx_grad", [True, False]) @@ -195,9 +196,22 @@ def check_minimize( @pytest.mark.parametrize("maxiter", [1, 1500]) @pytest.mark.parametrize("disp", [None]) @pytest.mark.parametrize("maxls", [20, 1]) -@pytest.mark.parametrize("executor", [None, concurrent.futures.ThreadPoolExecutor(max_workers=2)]) +@pytest.mark.parametrize( + "executor", [None, concurrent.futures.ThreadPoolExecutor(max_workers=2)] +) def test_minimize_args0( - fun_id, x0, approx_grad, maxcor, ftol, gtol, eps, iprint, maxiter, disp, maxls, executor + fun_id, + x0, + approx_grad, + maxcor, + ftol, + gtol, + eps, + iprint, + maxiter, + disp, + maxls, + executor, ): check_minimize( fun_id=fun_id, @@ -211,7 +225,7 @@ def test_minimize_args0( maxiter=maxiter, disp=disp, maxls=maxls, - executor=executor + executor=executor, ) @@ -247,7 +261,9 @@ def test_minimize_time(x0): @pytest.mark.parametrize("approx_grad", [True, False]) @pytest.mark.parametrize("max_workers", [2, None]) @pytest.mark.parametrize("forward", [True, False]) -@pytest.mark.parametrize("executor", [None, concurrent.futures.ThreadPoolExecutor(max_workers=2)]) +@pytest.mark.parametrize( + "executor", [None, concurrent.futures.ThreadPoolExecutor(max_workers=2)] +) def test_minimize_args1(fun_id, x0, args, approx_grad, max_workers, forward, executor): check_minimize( fun_id=fun_id, @@ -256,7 +272,7 @@ def test_minimize_args1(fun_id, x0, args, approx_grad, max_workers, forward, exe approx_grad=approx_grad, max_workers=max_workers, forward=forward, - executor=executor + executor=executor, ) @@ -267,7 +283,9 @@ def test_minimize_args1(fun_id, x0, args, approx_grad, max_workers, forward, exe @pytest.mark.parametrize("approx_grad", [True, False]) @pytest.mark.parametrize("max_workers", [2, None]) @pytest.mark.parametrize("forward", [True, False]) -@pytest.mark.parametrize("executor", [None, concurrent.futures.ThreadPoolExecutor(max_workers=2)]) +@pytest.mark.parametrize( + "executor", [None, concurrent.futures.ThreadPoolExecutor(max_workers=2)] +) def test_minimize_args2(fun_id, x0, args, approx_grad, max_workers, forward, executor): check_minimize( fun_id=fun_id, @@ -276,9 +294,10 @@ def test_minimize_args2(fun_id, x0, args, approx_grad, max_workers, forward, exe approx_grad=approx_grad, max_workers=max_workers, forward=forward, - executor=executor + executor=executor, ) + # test bounds upper ------------------------------- @pytest.mark.parametrize("fun_id", ["_upper0"]) @pytest.mark.parametrize("x0", [np.array([-9]), np.array([-9, -99])]) @@ -334,5 +353,3 @@ def test_minimize_tol(fun_id, approx_grad, x0, tol, options): fun = globals()["fun" + str(fun_id)] jac = None if approx_grad else globals()["jac" + str(fun_id)] compare_minimize(x0=x0, tol=tol, options=options) - - diff --git a/tox.ini b/tox.ini index defa2fc..b405fca 100644 --- a/tox.ini +++ b/tox.ini @@ -6,10 +6,10 @@ [tox] # skipsdist = true envlist = # py - # py35 - # py36 - py37 - # py38 + py39 + py310 + py311 + py312 # pypy [testenv]