diff --git a/src/estimagic/optimization/pounders_auxiliary.py b/src/estimagic/optimization/pounders_auxiliary.py index 2853db70d..3f5b2158e 100644 --- a/src/estimagic/optimization/pounders_auxiliary.py +++ b/src/estimagic/optimization/pounders_auxiliary.py @@ -290,10 +290,11 @@ def solve_subproblem( "gtol_abs_conjugate_gradient": gtol_abs_conjugate_gradient, "gtol_rel_conjugate_gradient": gtol_rel_conjugate_gradient, } - result = bntr(main_model, lower_bounds, upper_bounds, **options) + result = bntr(main_model, lower_bounds, upper_bounds, x_candidate=x0, **options) elif solver == "gqtpar": result = gqtpar( main_model, + x_candidate=x0, k_easy=k_easy, k_hard=k_hard, maxiter=maxiter, diff --git a/src/estimagic/optimization/subsolvers/bntr.py b/src/estimagic/optimization/subsolvers/bntr.py index 397413b22..e54f9b492 100644 --- a/src/estimagic/optimization/subsolvers/bntr.py +++ b/src/estimagic/optimization/subsolvers/bntr.py @@ -26,6 +26,7 @@ def bntr( model, lower_bounds, upper_bounds, + x_candidate, *, conjugate_gradient_method, maxiter, @@ -61,6 +62,7 @@ def bntr( for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. + x_candidate (np.ndarray): Initial guess for the solution of the subproblem. conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - "cg" @@ -105,8 +107,6 @@ def bntr( "default_radius": 100.00, } - x_candidate = np.zeros_like(model.linear_terms) - ( x_candidate, f_candidate, diff --git a/src/estimagic/optimization/subsolvers/bntr_fast.py b/src/estimagic/optimization/subsolvers/bntr_fast.py index cf93900d8..50f8c4ad6 100644 --- a/src/estimagic/optimization/subsolvers/bntr_fast.py +++ b/src/estimagic/optimization/subsolvers/bntr_fast.py @@ -18,6 +18,7 @@ def bntr_fast( model, lower_bounds, upper_bounds, + x_candidate, *, conjugate_gradient_method, maxiter, @@ -56,6 +57,7 @@ def bntr_fast( for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. + x_candidate (np.ndarray): Initial guess for the solution of the subproblem. conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - "cg" @@ -99,6 +101,7 @@ def bntr_fast( model_hessian=model_hessian, lower_bounds=lower_bounds, upper_bounds=upper_bounds, + x_candidate=x_candidate, conjugate_gradient_method=conjugate_gradient_method, maxiter=maxiter, maxiter_gradient_descent=maxiter_gradient_descent, @@ -126,6 +129,7 @@ def _bntr_fast_jitted( model_hessian, lower_bounds, upper_bounds, + x_candidate, conjugate_gradient_method, maxiter, maxiter_gradient_descent, @@ -163,6 +167,7 @@ def _bntr_fast_jitted( for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. + x_candidate (np.ndarray): Initial guess for the solution of the subproblem. conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - "cg" @@ -209,6 +214,7 @@ def _bntr_fast_jitted( model_hessian, lower_bounds, upper_bounds, + x_candidate, maxiter_gradient_descent, gtol_abs, gtol_rel, @@ -341,6 +347,7 @@ def _take_preliminary_gradient_descent_step_and_check_for_solution( model_hessian, lower_bounds, upper_bounds, + x_candidate, maxiter_gradient_descent, gtol_abs, gtol_rel, @@ -357,6 +364,7 @@ def _take_preliminary_gradient_descent_step_and_check_for_solution( for the parameter vector x. upper_bounds (np.ndarray): 1d array of shape (n,) with upper bounds for the parameter vector x. + x_candidate (np.ndarray): Initial guess for the solution of the subproblem. maxiter_gradient_descent (int): Maximum number of iterations in performing gradient descent step gtol_abs (float): Convergence tolerance for the absolute gradient norm. @@ -384,8 +392,6 @@ def _take_preliminary_gradient_descent_step_and_check_for_solution( converged = False convergence_reason = 0 - x_candidate = np.zeros(len(model_gradient)) - criterion_candidate = _evaluate_model_criterion( x_candidate, model_gradient, model_hessian ) diff --git a/src/estimagic/optimization/subsolvers/gqtpar.py b/src/estimagic/optimization/subsolvers/gqtpar.py index 170040c24..15b2bd5bb 100644 --- a/src/estimagic/optimization/subsolvers/gqtpar.py +++ b/src/estimagic/optimization/subsolvers/gqtpar.py @@ -19,7 +19,7 @@ class DampingFactors(NamedTuple): upper_bound: Union[float, None] = None -def gqtpar(model, *, k_easy=0.1, k_hard=0.2, maxiter=200): +def gqtpar(model, x_candidate, *, k_easy=0.1, k_hard=0.2, maxiter=200): """Solve the quadratic trust-region subproblem via nearly exact iterative method. This subproblem solver is mainly based on Conn et al. (2000) "Trust region methods" @@ -50,11 +50,10 @@ def gqtpar(model, *, k_easy=0.1, k_hard=0.2, maxiter=200): See pp. 194-197 in :cite:`Conn2000` for a more detailed description. Args: - main_model (NamedTuple): NamedTuple containing the parameters of the - main model, i.e.: + model (NamedTuple): NamedTuple containing the parameters of the main model, i.e. - ``linear_terms``, a np.ndarray of shape (n,) and - ``square_terms``, a np.ndarray of shape (n,n). - trustregion_radius (float): Trustregion radius, often referred to as delta. + x_candidate (np.ndarray): Initial guess for the solution of the subproblem. k_easy (float): topping criterion for the "easy" case. k_hard (float): Stopping criterion for the "hard" case. maxiter (int): Maximum number of iterations to perform. If reached, @@ -69,8 +68,6 @@ def gqtpar(model, *, k_easy=0.1, k_hard=0.2, maxiter=200): """ hessian_info = HessianInfo() - x_candidate = np.zeros_like(model.linear_terms) - # Small floating point number signaling that for vectors smaller # than that backward substituition is not reliable. # See Golub, G. H., Van Loan, C. F. (2013), "Matrix computations", p.165. diff --git a/src/estimagic/optimization/subsolvers/gqtpar_fast.py b/src/estimagic/optimization/subsolvers/gqtpar_fast.py index a7b96f9ad..26e4a8da0 100644 --- a/src/estimagic/optimization/subsolvers/gqtpar_fast.py +++ b/src/estimagic/optimization/subsolvers/gqtpar_fast.py @@ -5,7 +5,7 @@ from scipy.linalg.lapack import dpotrf as compute_cholesky_factorization -def gqtpar_fast(model, *, k_easy=0.1, k_hard=0.2, maxiter=200): +def gqtpar_fast(model, x_candidate, *, k_easy=0.1, k_hard=0.2, maxiter=200): """Solve the quadratic trust-region subproblem via nearly exact iterative method. This subproblem solver is mainly based on Conn et al. (2000) "Trust region methods" @@ -36,11 +36,10 @@ def gqtpar_fast(model, *, k_easy=0.1, k_hard=0.2, maxiter=200): See pp. 194-197 in :cite:`Conn2000` for a more detailed description. Args: - main_model (NamedTuple): NamedTuple containing the parameters of the - main model, i.e.: + model (NamedTuple): NamedTuple containing the parameters of the main model, i.e. - ``linear_terms``, a np.ndarray of shape (n,) and - ``square_terms``, a np.ndarray of shape (n,n). - trustregion_radius (float): Trustregion radius, often referred to as delta. + x_candidate (np.ndarray): Initial guess for the solution of the subproblem. k_easy (float): topping criterion for the "easy" case. k_hard (float): Stopping criterion for the "hard" case. maxiter (int): Maximum number of iterations to perform. If reached, @@ -56,7 +55,6 @@ def gqtpar_fast(model, *, k_easy=0.1, k_hard=0.2, maxiter=200): hessian_already_factorized = False model_gradient = model.linear_terms model_hessian = model.square_terms - x_candidate = np.zeros(len(model_gradient)) # Small floating point number signaling that for vectors smaller # than that backward substituition is not reliable. diff --git a/src/estimagic/optimization/tranquilo/process_arguments.py b/src/estimagic/optimization/tranquilo/process_arguments.py index 3836a292d..0c75d8cd9 100644 --- a/src/estimagic/optimization/tranquilo/process_arguments.py +++ b/src/estimagic/optimization/tranquilo/process_arguments.py @@ -25,7 +25,6 @@ get_default_radius_options, get_default_sample_size, get_default_search_radius_factor, - get_default_subsolver, update_option_bundle, ) from estimagic.optimization.tranquilo.region import Region @@ -156,13 +155,9 @@ def process_arguments( sample_points = get_sampler(sampler, sampler_options) solve_subproblem = get_subsolver( - solver=get_default_subsolver( - bounds=_bounds, - cube_subsolver=cube_subsolver, - sphere_subsolver=sphere_subsolver, - ), + cube_solver=cube_subsolver, + sphere_solver=sphere_subsolver, user_options=subsolver_options, - bounds=_bounds, ) filter_points = get_sample_filter( diff --git a/src/estimagic/optimization/tranquilo/solve_subproblem.py b/src/estimagic/optimization/tranquilo/solve_subproblem.py index 3ddeb3f15..58839a683 100644 --- a/src/estimagic/optimization/tranquilo/solve_subproblem.py +++ b/src/estimagic/optimization/tranquilo/solve_subproblem.py @@ -1,10 +1,9 @@ -import inspect -import warnings from functools import partial from typing import NamedTuple import numpy as np +from estimagic.optimization.tranquilo.get_component import get_component from estimagic.optimization.subsolvers.bntr import ( bntr, ) @@ -22,15 +21,21 @@ from estimagic.optimization.tranquilo.options import SubsolverOptions -def get_subsolver(solver, user_options=None, bounds=None): +def get_subsolver(sphere_solver, cube_solver, user_options=None): """Get an algorithm-function with partialled options. Args: - solver (str or callable): Name of a subproblem solver or subproblem solver. The - first argument of any subsolver needs to be ``model``. If the solver - supports bounds, the next arguments have to be ``lower_bounds`` and - ``upper_bounds``. Moreover, subsolvers can have any number of additional - keyword arguments. + sphere_solver (str or callable): Name of a subproblem solver or a subproblem + solver, designed to solve the problem in the unit sphere. The first argument + of any subsolver needs to be ``model``. The second argument needs to be + ``x_candidate``, an initial guess for the solution in the unit space. + Moreover, subsolvers can have any number of additional keyword arguments. + cube_solver (str or callable): Name of a subproblem solver or a subproblem + solver, designed to solve the problem in the unit box. The first argument + of any subsolver needs to be ``model``. The second and third arguments have + to be ``lower_bounds`` and ``upper_bounds``. The fourth argument needs to be + ``x_candidate``, an initial guess for the solution in the unit space. + Moreover, subsolvers can have any number of additional keyword arguments. user_options (dict): Options for the subproblem solver. The following are supported: - maxiter (int): Maximum number of iterations to perform when solving the @@ -57,173 +62,133 @@ def get_subsolver(solver, user_options=None, bounds=None): subproblem ("gqtpar"). - k_hard (float): Stopping criterion for the "hard" case in the trust-region subproblem ("gqtpar"). - bounds (Bounds): Returns: callable: The subsolver. """ - user_options = {} if user_options is None else user_options - - built_in_solvers = { - "bntr": bntr, - "bntr_fast": bntr_fast, + built_in_sphere_solvers = { "gqtpar": gqtpar, "gqtpar_fast": gqtpar_fast, - "multistart": solve_multistart, "slsqp_sphere": slsqp_sphere, } - if isinstance(solver, str) and solver in built_in_solvers: - _solver = built_in_solvers[solver] - _solver_name = solver - elif callable(solver): - _solver = solver - _solver_name = getattr(solver, "__name__", "your solver") - else: - raise ValueError( - "Invalid solver: {solver}. Must be one of {list(built_in_solvers)} " - "or a callable." - ) - - all_options = SubsolverOptions()._replace(**user_options)._asdict() - - args = set(inspect.signature(_solver).parameters) - - if "model" not in args: - raise ValueError("subproblem solvers need to take 'model' as first argument.") - - valid_bounds = {"lower_bounds", "upper_bounds"}.intersection(args) - - bounds_dict = {"lower_bounds": None, "upper_bounds": None} - if bounds is not None and bounds.has_any: - for type_ in ["lower", "upper"]: - candidate = getattr(bounds, type_) - if candidate is not None and np.isfinite(candidate).any(): - bounds_dict[f"{type_}_bounds"] = candidate - - for name, value in bounds_dict.items(): - if name not in valid_bounds and value is not None: - raise ValueError( - f"You have {name} but requested a subproblem solver that does not " - "support them. Use bntr or another bounded subproblem solver instead." - ) - - bounds_dict = {k: v for k, v in bounds_dict.items() if k in valid_bounds} - - not_options = {"model"} | valid_bounds - if isinstance(_solver, partial): - partialed_in = set(_solver.args).union(set(_solver.keywords)) - not_options = not_options | partialed_in - - valid_options = args - not_options - - reduced = {key: val for key, val in all_options.items() if key in valid_options} - - ignored = { - key: val for key, val in user_options.items() if key not in valid_options + built_in_cube_solvers = { + "bntr": bntr, + "bntr_fast": bntr_fast, + "multistart": solve_multistart, } - if ignored: - warnings.warn( - "The following options were ignored because they are not compatible " - f"with {_solver_name}:\n\n {ignored}" - ) + _sphere_subsolver = get_component( + name_or_func=sphere_solver, + component_name="sphere_solver", + func_dict=built_in_sphere_solvers, + default_options=SubsolverOptions(), + user_options=user_options, + mandatory_signature=["model", "x_candidate"], + ) + + _cube_subsolver = get_component( + name_or_func=cube_solver, + component_name="cube_solver", + func_dict=built_in_cube_solvers, + default_options=SubsolverOptions(), + user_options=user_options, + mandatory_signature=["model", "x_candidate", "lower_bounds", "upper_bounds"], + ) - out = partial( - _solve_subproblem_template, solver=_solver, bounds=bounds_dict, options=reduced + solver = partial( + _solve_subproblem_template, + sphere_solver=_sphere_subsolver, + cube_solver=_cube_subsolver, ) - return out + return solver def _solve_subproblem_template( model, trustregion, - solver, - bounds, - options, + sphere_solver, + cube_solver, ): """Solve the quadratic subproblem. Args: - model (NamedTuple): NamedTuple containing the parameters of the fitted surrogate - model, i.e. ``linear_terms`` and ``square_terms``. The model is assumed to - be defined in terms of centered and scaled parameter vectors. - trustregion (NamedTuple): Contains ``center`` (np.ndarray) and ``radius`` - (float). Used to center bounds. - solver (callable): Trust-region subsolver to use. All options must already be - partialled in such that the subsolver only depends on ``model``, - ``lower_bounds`` and ``upper_bounds`` - bounds (dict): Dict containing the entries "lower_bounds" and "upper_bounds" - Bounds are assumed to be in terms of the original parameter space, i.e. not - centered yet. - options (dict): Solver specific options. + model (ScalarModel): The fitted model of which we want to find the minimum. + trustregion (Region): The trustregion on which the model was fitted. + sphere_solver (callable): Spherical subproblem solver, designed to solve the + problem in the unit sphere. The first argument of any subsolver needs to be + ``model``. The second argument needs to be ``x_candidate``, an initial guess + for the solution in the unit space. Moreover, subsolvers can have any number + of additional keyword arguments. + cube_solver (callable): Cubical subproblem solver, designed to solve the problem + in the unit box. The first argument of any subsolver needs to be ``model``. + The second and third arguments have to be ``lower_bounds`` and + ``upper_bounds``. The fourth argument needs to be ``x_candidate``, an + initial guess for the solution in the unit space. Moreover, subsolvers can + have any number of additional keyword arguments. Returns: - (dict): Result dictionary containing the following entries: + SubproblemResult: Namedtuple with the following entries: - "x" (np.ndarray): The optimal x in terms of the original parameter space. - "expected_improvement" (float): The expected improvement at the solution. The sign has already been flipped, i.e. large means more improvement. - "n_iterations" (int): Number of iterations performed before termination. - "success" (bool): Boolean indicating whether a solution has been found before reaching maxiter. + - "x_unit" (np.ndarray): The optimal x in terms of the unit space. + - "shape" (str): Whether the trustregion was a sphere or a cube, which in + turn determines whether the sphere or cube solver was used. """ + old_x_unit = trustregion.map_to_unit(trustregion.center) - _bounds = _get_centered_and_scaled_bounds(bounds, trustregion) + solver = sphere_solver if trustregion.shape == "sphere" else cube_solver - raw_result = solver(model, **_bounds, **options) - - x = trustregion.map_from_unit(raw_result["x"]) - - if "lower_bounds" in bounds: - x = np.clip(x, bounds["lower_bounds"], np.inf) + raw_result = solver( + model=model, + x_candidate=old_x_unit, + # bounds can be passed to both solvers because the functions returned by + # `get_component` ignore redundant arguments. + lower_bounds=-np.ones_like(old_x_unit), + upper_bounds=np.ones_like(old_x_unit), + ) - if "upper_bounds" in bounds: - x = np.clip(x, -np.inf, bounds["upper_bounds"]) + if trustregion.shape == "cube": + raw_result["x"] = np.clip(raw_result["x"], -1.0, 1.0) # make sure expected improvement is calculated accurately in case of clipping and # does not depend on whether the subsolver ignores intercepts or not. - fval_old = model.predict(trustregion.map_to_unit(trustregion.center)) + fval_old = model.predict(old_x_unit) fval_candidate = model.predict(raw_result["x"]) expected_improvement = -(fval_candidate - fval_old) + # in case of negative expected improvement, we return the old point + if expected_improvement >= 0: + success = raw_result["success"] + x_unit = raw_result["x"] + x = trustregion.map_from_unit(raw_result["x"]) + else: + success = False + x_unit = old_x_unit + x = trustregion.center + expected_improvement = 0.0 + result = SubproblemResult( x=x, expected_improvement=expected_improvement, n_iterations=raw_result["n_iterations"], - success=raw_result["success"], - x_unit=raw_result["x"], + success=success, + x_unit=x_unit, shape=trustregion.shape, ) return result -def _get_centered_and_scaled_bounds(bounds, trustregion): - out = {} - n_params = len(trustregion.center) - if "lower_bounds" in bounds: - if bounds["lower_bounds"] is None: - lower_bounds = np.full(n_params, -1) - else: - lower_bounds = trustregion.map_to_unit(bounds["lower_bounds"]) - lower_bounds = np.nan_to_num(lower_bounds, nan=-1, neginf=-1) - out["lower_bounds"] = lower_bounds - - if "upper_bounds" in bounds: - if bounds["upper_bounds"] is None: - upper_bounds = np.ones(n_params) - else: - upper_bounds = trustregion.map_to_unit(bounds["upper_bounds"]) - upper_bounds = np.nan_to_num(upper_bounds, nan=1, posinf=1) - out["upper_bounds"] = upper_bounds - return out - - class SubproblemResult(NamedTuple): """Result of the subproblem solver.""" diff --git a/src/estimagic/optimization/tranquilo/wrapped_subsolvers.py b/src/estimagic/optimization/tranquilo/wrapped_subsolvers.py index 11907e26d..b025bccd5 100644 --- a/src/estimagic/optimization/tranquilo/wrapped_subsolvers.py +++ b/src/estimagic/optimization/tranquilo/wrapped_subsolvers.py @@ -6,10 +6,10 @@ from estimagic.optimization.tiktak import draw_exploration_sample -def solve_multistart(model, lower_bounds, upper_bounds): +def solve_multistart(model, x_candidate, lower_bounds, upper_bounds): np.random.seed(12345) start_values = draw_exploration_sample( - x=np.zeros(len(lower_bounds)), + x=x_candidate, lower=lower_bounds, upper=upper_bounds, n_samples=100, @@ -45,16 +45,13 @@ def crit(x): } -def slsqp_sphere(model, x0=None, lower_bounds=None, upper_bounds=None): # noqa: ARG001 +def slsqp_sphere(model, x_candidate): crit, grad = get_crit_and_grad(model) constraints = get_constraints() - if x0 is None: - x0 = np.zeros(len(model.linear_terms)) - res = minimize( crit, - x0, + x_candidate, method="slsqp", jac=grad, constraints=constraints, diff --git a/tests/optimization/subsolvers/test_bntr_fast.py b/tests/optimization/subsolvers/test_bntr_fast.py index 8ee14793c..cf24d6216 100644 --- a/tests/optimization/subsolvers/test_bntr_fast.py +++ b/tests/optimization/subsolvers/test_bntr_fast.py @@ -201,6 +201,7 @@ def test_take_preliminary_gradient_descent_and_check_for_convergence(): "model_hessian": model_hessian, "lower_bounds": lower_bounds, "upper_bounds": upper_bounds, + "x_candidate": x_candidate, "maxiter_gradient_descent": 5, "gtol_abs": 1e-08, "gtol_rel": 1e-08, @@ -503,9 +504,15 @@ def test_minimize_bntr(): "gtol_abs_conjugate_gradient": 1e-08, "gtol_rel_conjugate_gradient": 1e-06, } - res_orig = bntr(model, lower_bounds, upper_bounds, **options) + x0 = np.zeros_like(lower_bounds) + res_orig = bntr(model, lower_bounds, upper_bounds, x_candidate=x0, **options) res_fast = _bntr_fast_jitted( - model.linear_terms, model.square_terms, lower_bounds, upper_bounds, **options + model_gradient=model.linear_terms, + model_hessian=model.square_terms, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + x_candidate=x0, + **options, ) # using aaae to get tests run on windows machines. aaae(res_orig["x"], res_fast[0]) @@ -530,7 +537,12 @@ def test_minimize_bntr_break_loop_early(): "gtol_rel_conjugate_gradient": 10, } res_fast = _bntr_fast_jitted( - model.linear_terms, model.square_terms, lower_bounds, upper_bounds, **options + model_gradient=model.linear_terms, + model_hessian=model.square_terms, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + x_candidate=np.zeros_like(lower_bounds), + **options, ) # using aaae to get tests run on windows machines. aaae(np.zeros(len(model.linear_terms)), res_fast[0]) diff --git a/tests/optimization/test_quadratic_subsolvers.py b/tests/optimization/test_quadratic_subsolvers.py index ce3db3ddc..f541d4a82 100644 --- a/tests/optimization/test_quadratic_subsolvers.py +++ b/tests/optimization/test_quadratic_subsolvers.py @@ -161,7 +161,7 @@ ), np.array([-1.0, -1.0, -1.0]), np.ones(3), - np.array([1, 1, -1]), + np.array([1.0, 1, -1]), ), ( np.array([39307.4, 43176.2, 19136.1]), @@ -466,8 +466,20 @@ def test_bounded_newton_trustregion( "gtol_rel_conjugate_gradient": 1e-6, } - result = bntr(main_model, lower_bounds, upper_bounds, **options) - result_fast = bntr_fast(main_model, lower_bounds, upper_bounds, **options) + result = bntr( + main_model, + lower_bounds, + upper_bounds, + x_candidate=np.zeros_like(x_expected), + **options + ) + result_fast = bntr_fast( + main_model, + lower_bounds, + upper_bounds, + x_candidate=np.zeros_like(x_expected), + **options + ) aaae(result["x"], x_expected, decimal=5) aaae(result_fast["x"], x_expected, decimal=5) @@ -505,7 +517,7 @@ def test_bounded_newton_trustregion( def test_gqtpar_quadratic(linear_terms, square_terms, x_expected, criterion_expected): main_model = MainModel(linear_terms=linear_terms, square_terms=square_terms) - result = gqtpar(main_model) + result = gqtpar(main_model, x_candidate=np.zeros_like(x_expected)) aaae(result["x"], x_expected) aaae(result["criterion"], criterion_expected) diff --git a/tests/optimization/tranquilo/test_rho_noise.py b/tests/optimization/tranquilo/test_rho_noise.py index 4e2a145ff..dd62528b1 100644 --- a/tests/optimization/tranquilo/test_rho_noise.py +++ b/tests/optimization/tranquilo/test_rho_noise.py @@ -3,6 +3,7 @@ from estimagic.optimization.tranquilo.aggregate_models import get_aggregator from estimagic.optimization.tranquilo.fit_models import get_fitter from estimagic.optimization.tranquilo.region import Region +from estimagic.optimization.tranquilo.bounds import Bounds from estimagic.optimization.tranquilo.rho_noise import simulate_rho_noise from estimagic.optimization.tranquilo.solve_subproblem import get_subsolver from numpy.testing import assert_array_almost_equal as aaae @@ -45,7 +46,7 @@ def test_convergence_to_one_if_noise_is_tiny(functype): noise_cov = np.eye(n_residuals) * 1e-12 - trustregion = Region(center=np.ones(2) * 0.5, radius=1.0) + trustregion = Region(center=np.ones(2) * 0.5, radius=1.0, bounds=Bounds(None, None)) model_fitter = get_fitter( fitter="ols", model_type=model_type, @@ -57,7 +58,7 @@ def test_convergence_to_one_if_noise_is_tiny(functype): xs, fvecs, weights=None, region=trustregion, old_model=None ) - subsolver = get_subsolver(solver="gqtpar") + subsolver = get_subsolver(sphere_solver="gqtpar", cube_solver="bntr") rng = np.random.default_rng(123) diff --git a/tests/optimization/tranquilo/test_solve_subproblem.py b/tests/optimization/tranquilo/test_solve_subproblem.py index 1acda4bd0..7fda79396 100644 --- a/tests/optimization/tranquilo/test_solve_subproblem.py +++ b/tests/optimization/tranquilo/test_solve_subproblem.py @@ -1,8 +1,9 @@ import numpy as np import pytest from estimagic.optimization.tranquilo.models import ScalarModel -from estimagic.optimization.tranquilo.region import Region from estimagic.optimization.tranquilo.solve_subproblem import get_subsolver +from estimagic.optimization.tranquilo.region import Region +from estimagic.optimization.tranquilo.bounds import Bounds from numpy.testing import assert_array_almost_equal as aaae solvers = ["gqtpar", "gqtpar_fast"] @@ -32,12 +33,9 @@ def test_without_bounds(solver_name): intercept=0, linear_terms=linear_terms, square_terms=quadratic_terms ) - trustregion = Region( - center=np.zeros(3), - radius=1, - ) + trustregion = Region(center=np.zeros(3), radius=1, bounds=Bounds(None, None)) - solve_subproblem = get_subsolver(solver_name) + solve_subproblem = get_subsolver(sphere_solver=solver_name, cube_solver="bntr") calculated = solve_subproblem( model=model,